code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( snake_case__ ): _lowercase: str = DistilBertTokenizer _lowercase: Optional[int] = DistilBertTokenizerFast _lowercase: Optional[int] = True @slow def lowercase__ ( self : Union[str, Any] ) -> List[Any]: _lowerCAmelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) _lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase_ ) _lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase_ ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) _lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
207
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a ( snake_case__ ): '''simple docstring''' __lowerCAmelCase : Optional[int] = """ClapFeatureExtractor""" __lowerCAmelCase : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __call__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> List[Any]: _a : Any = kwargs.pop('sampling_rate' , lowerCamelCase_ ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _a : Union[str, Any] = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if audios is not None: _a : List[str] = self.feature_extractor( lowerCamelCase_ , sampling_rate=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if text is not None and audios is not None: _a : List[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ ) def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple: return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]: return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @property def __UpperCamelCase ( self ) -> Tuple: _a : Dict = self.tokenizer.model_input_names _a : Tuple = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
120
0
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() _snake_case : List[str] = logging.get_logger(__name__) _snake_case : Any = """The Nymphenburg Palace is a beautiful palace in Munich!""" def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): _SCREAMING_SNAKE_CASE = { "attention_cell": "multi_head", "num_layers": 4, "units": 1024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } _SCREAMING_SNAKE_CASE = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py _SCREAMING_SNAKE_CASE = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_SCREAMING_SNAKE_CASE , output_all_encodings=_SCREAMING_SNAKE_CASE , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _SCREAMING_SNAKE_CASE ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later _SCREAMING_SNAKE_CASE = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab _SCREAMING_SNAKE_CASE = os.path.join(get_home_dir() , "models" ) _SCREAMING_SNAKE_CASE = _load_vocab(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = nlp.model.BERTModel( _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_SCREAMING_SNAKE_CASE , use_token_type_embed=_SCREAMING_SNAKE_CASE , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_SCREAMING_SNAKE_CASE , use_decoder=_SCREAMING_SNAKE_CASE , ) original_bort.load_parameters(_SCREAMING_SNAKE_CASE , cast_dtype=_SCREAMING_SNAKE_CASE , ignore_extra=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = original_bort._collect_params_with_prefix() # Build our config 🤗 _SCREAMING_SNAKE_CASE = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(_SCREAMING_SNAKE_CASE ), } _SCREAMING_SNAKE_CASE = BertConfig.from_dict(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = BertForMaskedLM(_SCREAMING_SNAKE_CASE ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(_SCREAMING_SNAKE_CASE : int ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ): _SCREAMING_SNAKE_CASE = hf_param.shape _SCREAMING_SNAKE_CASE = to_torch(params[gluon_param] ) _SCREAMING_SNAKE_CASE = gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param _SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) _SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) _SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) _SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) _SCREAMING_SNAKE_CASE = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): _SCREAMING_SNAKE_CASE = hf_bort_model.bert.encoder.layer[i] # self attention _SCREAMING_SNAKE_CASE = layer.attention.self _SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output _SCREAMING_SNAKE_CASE = layer.attention.output _SCREAMING_SNAKE_CASE = check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) _SCREAMING_SNAKE_CASE = check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate _SCREAMING_SNAKE_CASE = layer.intermediate _SCREAMING_SNAKE_CASE = check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) _SCREAMING_SNAKE_CASE = check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output _SCREAMING_SNAKE_CASE = layer.output _SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) _SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) _SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) _SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models _SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("roberta-base" ) _SCREAMING_SNAKE_CASE = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE )["input_ids"] # Get gluon output _SCREAMING_SNAKE_CASE = mx.nd.array([input_ids] ) _SCREAMING_SNAKE_CASE = original_bort(inputs=_SCREAMING_SNAKE_CASE , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = BertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) hf_bort_model.eval() _SCREAMING_SNAKE_CASE = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="pt" ) _SCREAMING_SNAKE_CASE = hf_bort_model(**_SCREAMING_SNAKE_CASE )[0] _SCREAMING_SNAKE_CASE = output_gluon[0].asnumpy() _SCREAMING_SNAKE_CASE = output_hf[0].detach().numpy() _SCREAMING_SNAKE_CASE = np.max(np.abs(hf_layer - gluon_layer ) ).item() _SCREAMING_SNAKE_CASE = np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _snake_case : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _snake_case : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
493
'''simple docstring''' def _a ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ): # Check if the input is valid if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = equationa _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = equationa # Calculate the determinants of the matrices _SCREAMING_SNAKE_CASE = aa * ba - aa * ba _SCREAMING_SNAKE_CASE = ca * ba - ca * ba _SCREAMING_SNAKE_CASE = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _SCREAMING_SNAKE_CASE = determinant_x / determinant _SCREAMING_SNAKE_CASE = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
493
1
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''Hello world! cécé herlolip''' def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool ): __a : Union[str, Any] = FairseqRobertaModel.from_pretrained(lowerCamelCase_ ) roberta.eval() # disable dropout __a : Optional[int] = roberta.model.encoder.sentence_encoder __a : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: __a : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , lowerCamelCase_ ) __a : Dict = XLMRobertaXLForSequenceClassification(lowerCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase_ ) model.eval() # Now let's copy all the weights. # Embeddings __a : Optional[int] = roberta_sent_encoder.embed_tokens.weight __a : Union[str, Any] = roberta_sent_encoder.embed_positions.weight __a : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __a : int = roberta_sent_encoder.layer_norm.weight __a : Union[str, Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __a : BertLayer = model.roberta.encoder.layer[i] __a : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] __a : RobertaAttention = layer.attention __a : Any = roberta_layer.self_attn_layer_norm.weight __a : Any = roberta_layer.self_attn_layer_norm.bias # self attention __a : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __a : int = roberta_layer.self_attn.q_proj.weight __a : Dict = roberta_layer.self_attn.q_proj.bias __a : List[Any] = roberta_layer.self_attn.k_proj.weight __a : Any = roberta_layer.self_attn.k_proj.bias __a : List[str] = roberta_layer.self_attn.v_proj.weight __a : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output __a : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __a : int = roberta_layer.self_attn.out_proj.weight __a : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __a : Tuple = roberta_layer.final_layer_norm.weight __a : Dict = roberta_layer.final_layer_norm.bias # intermediate __a : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __a : Union[str, Any] = roberta_layer.fca.weight __a : Tuple = roberta_layer.fca.bias # output __a : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __a : int = roberta_layer.fca.weight __a : Optional[Any] = roberta_layer.fca.bias # end of layer if classification_head: __a : Union[str, Any] = roberta.model.classification_heads['mnli'].dense.weight __a : Dict = roberta.model.classification_heads['mnli'].dense.bias __a : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight __a : Tuple = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head __a : Optional[Any] = roberta.model.encoder.lm_head.dense.weight __a : Tuple = roberta.model.encoder.lm_head.dense.bias __a : Dict = roberta.model.encoder.lm_head.layer_norm.weight __a : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias __a : Union[str, Any] = roberta.model.encoder.lm_head.weight __a : Optional[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __a : torch.Tensor = roberta.encode(lowerCamelCase_ ).unsqueeze(0 ) # batch of size 1 __a : Tuple = model(lowerCamelCase_ )[0] if classification_head: __a : Union[str, Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCamelCase_ ) ) else: __a : int = roberta.model(lowerCamelCase_ )[0] print(our_output.shape , their_output.shape ) __a : Any = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 __a : Union[str, Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(lowerCamelCase_ ).mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
47
import logging from transformers import PretrainedConfig _snake_case = logging.getLogger(__name__) _snake_case = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class _lowerCAmelCase ( __magic_name__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int ="bertabs" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=0.2 , SCREAMING_SNAKE_CASE__ : Tuple=6 , SCREAMING_SNAKE_CASE__ : Any=7_68 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.2 , **SCREAMING_SNAKE_CASE__ : List[str] , ): """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) UpperCamelCase = vocab_size UpperCamelCase = max_pos UpperCamelCase = enc_layers UpperCamelCase = enc_hidden_size UpperCamelCase = enc_heads UpperCamelCase = enc_ff_size UpperCamelCase = enc_dropout UpperCamelCase = dec_layers UpperCamelCase = dec_hidden_size UpperCamelCase = dec_heads UpperCamelCase = dec_ff_size UpperCamelCase = dec_dropout
282
0
'''simple docstring''' def a ( ) -> int: '''simple docstring''' return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__a , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F"""{solution() = }""")
280
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = '''hf-internal-testing/tiny-random-t5''' UpperCamelCase__ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :int = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :int = tokenizer('''This is me''' , return_tensors='''pt''' ) UpperCamelCase__ :Optional[int] = model.to_bettertransformer() self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) UpperCamelCase__ :int = model.generate(**UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = model.reverse_bettertransformer() self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Tuple = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ) self.assertFalse( any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) UpperCamelCase__ :Tuple = model_reloaded.generate(**UpperCamelCase_ ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = '''hf-internal-testing/tiny-random-t5''' UpperCamelCase__ :Dict = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Union[str, Any] = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase_ ): model.save_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase_ )
280
1
import doctest from collections import deque import numpy as np class A__ : def __init__( self ) -> None: """simple docstring""" __magic_name__ : List[str] = [2, 1, 2, -1] __magic_name__ : List[Any] = [1, 2, 3, 4] def lowercase ( self ) -> list[float]: """simple docstring""" __magic_name__ : Optional[Any] = len(self.first_signal ) __magic_name__ : Any = len(self.second_signal ) __magic_name__ : Optional[Any] = max(lowerCamelCase , lowerCamelCase ) # create a zero matrix of max_length x max_length __magic_name__ : int = [[0] * max_length for i in range(lowerCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowerCamelCase ): __magic_name__ : str = deque(self.second_signal ) rotated_signal.rotate(lowerCamelCase ) for j, item in enumerate(lowerCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal __magic_name__ : Tuple = np.matmul(np.transpose(lowerCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowerCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
154
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging lowercase_ = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class A__ ( __SCREAMING_SNAKE_CASE ): def __init__( self , lowerCamelCase = 101 ) -> Any: """simple docstring""" __magic_name__ : List[str] = length def __len__( self ) -> Union[str, Any]: """simple docstring""" return self.length def __getitem__( self , lowerCamelCase ) -> int: """simple docstring""" return i class A__ : def __call__( self , lowerCamelCase ) -> Optional[Any]: """simple docstring""" return {"input_ids": torch.tensor(lowerCamelCase ), "labels": torch.tensor(lowerCamelCase )} class A__ ( nn.Module ): def __init__( self ) -> Tuple: """simple docstring""" super().__init__() # Add some (unused) params otherwise DDP will complain. __magic_name__ : Tuple = nn.Linear(120 , 80 ) def lowercase ( self , lowerCamelCase , lowerCamelCase=None ) -> List[str]: """simple docstring""" if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class A__ ( __SCREAMING_SNAKE_CASE ): @require_torch_neuroncore def lowercase ( self ) -> List[Any]: """simple docstring""" __magic_name__ : Union[str, Any] = F'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() __magic_name__ : Dict = self.get_auto_remove_tmp_dir() __magic_name__ : str = F'''--output_dir {output_dir}'''.split() __magic_name__ : Optional[Any] = ['''torchrun'''] + distributed_args + args execute_subprocess_async(lowerCamelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class A__ ( __SCREAMING_SNAKE_CASE ): @require_torch_multi_gpu def lowercase ( self ) -> str: """simple docstring""" __magic_name__ : Optional[Any] = F'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() __magic_name__ : str = self.get_auto_remove_tmp_dir() __magic_name__ : List[str] = F'''--output_dir {output_dir}'''.split() __magic_name__ : Tuple = ['''torchrun'''] + distributed_args + args execute_subprocess_async(lowerCamelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py lowercase_ = HfArgumentParser((TrainingArguments,)) lowercase_ = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: lowercase_ = DummyDataset(dataset_length) def lowerCAmelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" __magic_name__ : str = list(range(len(UpperCAmelCase ) ) ) __magic_name__ : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( '''Predictions and/or labels do not match expected results:\n - predictions: ''' F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} lowercase_ = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) lowercase_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowercase_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowercase_ = 2 lowercase_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowercase_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowercase_ = None
154
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ =[ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ , a_ =emb.weight.shape a_ =nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) a_ =emb.weight.data return lin_layer def UpperCAmelCase_ ( lowercase__ , lowercase__="facebook/mbart-large-en-ro" , lowercase__=False , lowercase__=False ): '''simple docstring''' a_ =torch.load(lowercase__ , map_location="cpu" )["model"] remove_ignore_keys_(lowercase__ ) a_ =state_dict["encoder.embed_tokens.weight"].shape[0] a_ =MBartConfig.from_pretrained(lowercase__ , vocab_size=lowercase__ ) if mbart_aa and finetuned: a_ ="relu" a_ =state_dict["decoder.embed_tokens.weight"] a_ =MBartForConditionalGeneration(lowercase__ ) model.model.load_state_dict(lowercase__ ) if finetuned: a_ =make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowercase = parser.parse_args() lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
41
'''simple docstring''' from __future__ import annotations def UpperCAmelCase_ ( lowercase__ , lowercase__ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(lowercase__ ): print(F"""{i}\t\t{d}""" ) def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' for j in range(lowercase__ ): a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf" ) and distance[u] + w < distance[v]: return True return False def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' a_ =[float("inf" )] * vertex_count a_ =0.0 for _ in range(vertex_count - 1 ): for j in range(lowercase__ ): a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf" ) and distance[u] + w < distance[v]: a_ =distance[u] + w a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ ) if negative_cycle_exists: raise Exception("Negative cycle found" ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowercase = int(input('''Enter number of vertices: ''').strip()) lowercase = int(input('''Enter number of edges: ''').strip()) lowercase = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) lowercase , lowercase , lowercase = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight} lowercase = int(input('''\nEnter shortest path source:''').strip()) lowercase = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
41
1
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCAmelCase ( snake_case__ , unittest.TestCase ): '''simple docstring''' A = BertTokenizer A = BertTokenizerFast A = True A = True A = filter_non_english def lowerCamelCase__ ( self :Tuple ) -> List[str]: """simple docstring""" super().setUp() UpperCamelCase__ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def lowerCamelCase__ ( self :str , lowerCamelCase_ :Optional[Any] ) -> Any: """simple docstring""" UpperCamelCase__ = "UNwant\u00E9d,running" UpperCamelCase__ = "unwanted, running" return input_text, output_text def lowerCamelCase__ ( self :Dict ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ = self.tokenizer_class(self.vocab_file ) UpperCamelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def lowerCamelCase__ ( self :List[str] ) -> Optional[int]: """simple docstring""" if not self.test_rust_tokenizer: return UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = self.get_rust_tokenizer() UpperCamelCase__ = "UNwant\u00E9d,running" UpperCamelCase__ = tokenizer.tokenize(lowerCamelCase_ ) UpperCamelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = self.get_rust_tokenizer() UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ ) UpperCamelCase__ = rust_tokenizer.encode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) # With lower casing UpperCamelCase__ = self.get_tokenizer(do_lower_case=lowerCamelCase_ ) UpperCamelCase__ = self.get_rust_tokenizer(do_lower_case=lowerCamelCase_ ) UpperCamelCase__ = "UNwant\u00E9d,running" UpperCamelCase__ = tokenizer.tokenize(lowerCamelCase_ ) UpperCamelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = self.get_rust_tokenizer() UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ ) UpperCamelCase__ = rust_tokenizer.encode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self :str ) -> List[Any]: """simple docstring""" UpperCamelCase__ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def lowerCamelCase__ ( self :Dict ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def lowerCamelCase__ ( self :str ) -> int: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def lowerCamelCase__ ( self :int ) -> Any: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def lowerCamelCase__ ( self :Dict ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def lowerCamelCase__ ( self :List[str] ) -> Any: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def lowerCamelCase__ ( self :List[Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def lowerCamelCase__ ( self :Optional[Any] ) -> Any: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def lowerCamelCase__ ( self :int ) -> int: """simple docstring""" UpperCamelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase__ = BasicTokenizer() UpperCamelCase__ = "a\n'll !!to?'d of, can't." UpperCamelCase__ = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCamelCase_ ) , lowerCamelCase_ ) def lowerCamelCase__ ( self :Optional[Any] ) -> Dict: """simple docstring""" UpperCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCamelCase__ = {} for i, token in enumerate(lowerCamelCase_ ): UpperCamelCase__ = i UpperCamelCase__ = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def lowerCamelCase__ ( self :Tuple ) -> List[Any]: """simple docstring""" self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def lowerCamelCase__ ( self :List[str] ) -> List[Any]: """simple docstring""" self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def lowerCamelCase__ ( self :Optional[int] ) -> List[str]: """simple docstring""" self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def lowerCamelCase__ ( self :Tuple ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCamelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCamelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def lowerCamelCase__ ( self :List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCamelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def lowerCamelCase__ ( self :Tuple ) -> int: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) UpperCamelCase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' UpperCamelCase__ = tokenizer_r.encode_plus( lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , ) UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(lowerCamelCase_ , "do_lower_case" ) else False UpperCamelCase__ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def lowerCamelCase__ ( self :List[Any] ) -> Tuple: """simple docstring""" UpperCamelCase__ = ["的", "人", "有"] UpperCamelCase__ = "".join(lowerCamelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): UpperCamelCase__ = True UpperCamelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) UpperCamelCase__ = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = False UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) UpperCamelCase__ = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCamelCase__ = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase_ ) ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
516
"""simple docstring""" from __future__ import annotations import time A : List[str] = list[tuple[int, int]] A : Tuple = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class lowerCAmelCase : '''simple docstring''' def __init__( self :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Node | None ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ = pos_x UpperCamelCase__ = pos_y UpperCamelCase__ = (pos_y, pos_x) UpperCamelCase__ = goal_x UpperCamelCase__ = goal_y UpperCamelCase__ = parent class lowerCAmelCase : '''simple docstring''' def __init__( self :int , lowerCamelCase_ :tuple[int, int] , lowerCamelCase_ :tuple[int, int] ) -> List[Any]: """simple docstring""" UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase_ ) UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase_ ) UpperCamelCase__ = [self.start] UpperCamelCase__ = False def lowerCamelCase__ ( self :Any ) -> Path | None: """simple docstring""" while self.node_queue: UpperCamelCase__ = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCamelCase__ = True return self.retrace_path(lowerCamelCase_ ) UpperCamelCase__ = self.get_successors(lowerCamelCase_ ) for node in successors: self.node_queue.append(lowerCamelCase_ ) if not self.reached: return [self.start.pos] return None def lowerCamelCase__ ( self :str , lowerCamelCase_ :Node ) -> list[Node]: """simple docstring""" UpperCamelCase__ = [] for action in delta: UpperCamelCase__ = parent.pos_x + action[1] UpperCamelCase__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , lowerCamelCase_ ) ) return successors def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Node | None ) -> Path: """simple docstring""" UpperCamelCase__ = node UpperCamelCase__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCamelCase__ = current_node.parent path.reverse() return path class lowerCAmelCase : '''simple docstring''' def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ) -> int: """simple docstring""" UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = False def lowerCamelCase__ ( self :int ) -> Path | None: """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCamelCase__ = self.fwd_bfs.node_queue.pop(0 ) UpperCamelCase__ = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCamelCase__ = True return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase__ = current_bwd_node UpperCamelCase__ = current_fwd_node UpperCamelCase__ = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCamelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Node , lowerCamelCase_ :Node ) -> Path: """simple docstring""" UpperCamelCase__ = self.fwd_bfs.retrace_path(lowerCamelCase_ ) UpperCamelCase__ = self.bwd_bfs.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCamelCase__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() A : str = (0, 0) A : Any = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Any = time.time() A : Optional[int] = BreadthFirstSearch(init, goal) A : List[str] = bfs.search() A : Dict = time.time() - start_bfs_time print('Unidirectional BFS computation time : ', bfs_time) A : Optional[int] = time.time() A : Any = BidirectionalBreadthFirstSearch(init, goal) A : List[Any] = bd_bfs.search() A : Dict = time.time() - start_bd_bfs_time print('Bidirectional BFS computation time : ', bd_bfs_time)
516
1
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def snake_case_ (_a : str , _a : int ): assert isinstance(_a , _a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def snake_case_ (_a : List[str] , _a : Tuple , _a : Optional[Any] ): UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase = ParquetDatasetReader(_a , cache_dir=_a , keep_in_memory=_a ).read() _check_parquet_dataset(_a , _a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def snake_case_ (_a : Tuple , _a : int , _a : Dict ): UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase = features.copy() if features else default_expected_features UpperCAmelCase = ( Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase = ParquetDatasetReader(_a , features=_a , cache_dir=_a ).read() _check_parquet_dataset(_a , _a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def snake_case_ (_a : Any , _a : int , _a : Dict ): UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase = ParquetDatasetReader(_a , cache_dir=_a , split=_a ).read() _check_parquet_dataset(_a , _a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def snake_case_ (_a : Dict , _a : Optional[Any] , _a : str ): if issubclass(_a , _a ): UpperCAmelCase = parquet_path elif issubclass(_a , _a ): UpperCAmelCase = [parquet_path] UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase = ParquetDatasetReader(_a , cache_dir=_a ).read() _check_parquet_dataset(_a , _a ) def snake_case_ (_a : Any , _a : Union[str, Any] , _a : Tuple=("train",) ): assert isinstance(_a , _a ) for split in splits: UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def snake_case_ (_a : Optional[Any] , _a : Optional[int] , _a : Dict ): UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=_a , keep_in_memory=_a ).read() _check_parquet_datasetdict(_a , _a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def snake_case_ (_a : Tuple , _a : List[str] , _a : Dict ): UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase = features.copy() if features else default_expected_features UpperCAmelCase = ( Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=_a , cache_dir=_a ).read() _check_parquet_datasetdict(_a , _a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def snake_case_ (_a : List[Any] , _a : Dict , _a : List[str] ): if split: UpperCAmelCase = {split: parquet_path} else: UpperCAmelCase = '''train''' UpperCAmelCase = {'''train''': parquet_path, '''test''': parquet_path} UpperCAmelCase = tmp_path / '''cache''' UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase = ParquetDatasetReader(_a , cache_dir=_a ).read() _check_parquet_datasetdict(_a , _a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def snake_case_ (_a : Union[str, Any] , _a : int ): UpperCAmelCase = ParquetDatasetWriter(_a , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 UpperCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' ) UpperCAmelCase = pf.read() assert dataset.data.table == output_table def snake_case_ (_a : Any , _a : Optional[Any] ): UpperCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' ) UpperCAmelCase = {'''image''': [image_path]} UpperCAmelCase = Features({'''image''': Image()} ) UpperCAmelCase = Dataset.from_dict(_a , features=_a ) UpperCAmelCase = ParquetDatasetWriter(_a , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 UpperCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features UpperCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=_a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def snake_case_ (_a : str , _a : Optional[int] ): assert get_writer_batch_size(_a ) == expected
358
'''simple docstring''' import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _a : def __init__( self : List[str] , lowercase : Dict , lowercase : List[Any]=13 , lowercase : Optional[Any]=7 , lowercase : Any=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : str=True , lowercase : List[str]=99 , lowercase : int=64 , lowercase : List[Any]=32 , lowercase : str=5 , lowercase : Optional[int]=4 , lowercase : int=37 , lowercase : str="gelu" , lowercase : Any=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[int]=512 , lowercase : Union[str, Any]=16 , lowercase : List[str]=2 , lowercase : Tuple=0.02 , lowercase : List[Any]=3 , lowercase : int=4 , lowercase : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = embedding_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[Any] ): '''simple docstring''' return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def A ( self : Tuple , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : str , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = MobileBertModel(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) UpperCAmelCase = model(lowercase , token_type_ids=lowercase ) UpperCAmelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : Dict , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = MobileBertForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Dict , lowercase : Any , lowercase : Dict , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = MobileBertForNextSentencePrediction(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = MobileBertForPreTraining(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : str , lowercase : Optional[Any] , lowercase : List[str] , lowercase : int , lowercase : Any ): '''simple docstring''' UpperCAmelCase = MobileBertForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : List[str] , lowercase : str , lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = MobileBertForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : List[Any] , lowercase : int , lowercase : List[Any] , lowercase : Tuple , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = MobileBertForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , lowercase : Optional[int] , lowercase : Tuple , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Tuple , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = self.num_choices UpperCAmelCase = MobileBertForMultipleChoice(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _a ( __a , __a , unittest.TestCase ): __a : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) __a : List[str] = ( { """feature-extraction""": MobileBertModel, """fill-mask""": MobileBertForMaskedLM, """question-answering""": MobileBertForQuestionAnswering, """text-classification""": MobileBertForSequenceClassification, """token-classification""": MobileBertForTokenClassification, """zero-shot""": MobileBertForSequenceClassification, } if is_torch_available() else {} ) __a : Any = True def A ( self : List[Any] , lowercase : int , lowercase : Any , lowercase : Optional[Any]=False ): '''simple docstring''' UpperCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) if return_labels: if model_class in get_values(lowercase ): UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase ) UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase ) return inputs_dict def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = MobileBertModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase ) def snake_case_ (_a : List[Any] ): return torch.tensor( _a , dtype=torch.long , device=_a , ) A =1E-3 @require_torch @require_sentencepiece @require_tokenizers class _a ( unittest.TestCase ): @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowercase ) UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): UpperCAmelCase = model(lowercase )[0] UpperCAmelCase = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , lowercase ) UpperCAmelCase = torch.tensor( [ [ [-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05], [-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00], [2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01], ] ] , device=lowercase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
358
1
'''simple docstring''' import numpy as np from PIL import Image def __UpperCAmelCase ( a_: Union[str, Any], a_: List[Any], a_: str ): _UpperCAmelCase : int = np.array(_lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Dict = 0 _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Any = 0 # compute the shape of the output matrix _UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape _UpperCAmelCase : Any = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix _UpperCAmelCase : Optional[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : int = 0 return updated_arr def __UpperCAmelCase ( a_: Union[str, Any], a_: Dict, a_: List[Any] ): _UpperCAmelCase : int = np.array(_lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Any = 0 _UpperCAmelCase : int = 0 _UpperCAmelCase : Any = 0 # compute the shape of the output matrix _UpperCAmelCase : Union[str, Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape _UpperCAmelCase : Optional[Any] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix _UpperCAmelCase : int = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : str = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __a = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
494
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.normalize(_lowercase ) SCREAMING_SNAKE_CASE : List[str] = nn.functional.normalize(_lowercase ) return torch.mm(_lowercase , normalized_text_embeds.t() ) class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = CLIPConfig UpperCamelCase_ = ["""CLIPEncoderLayer"""] def __init__( self : str , UpperCamelCase__ : CLIPConfig ): '''simple docstring''' super().__init__(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModel(config.vision_config ) SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ ) @torch.no_grad() def __A ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.vision_model(UpperCamelCase__ )[1] # pooled_output SCREAMING_SNAKE_CASE : Any = self.visual_projection(UpperCamelCase__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy() SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : str = image_embeds.shape[0] for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : Dict = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images SCREAMING_SNAKE_CASE : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): SCREAMING_SNAKE_CASE : Dict = special_cos_dist[i][concept_idx] SCREAMING_SNAKE_CASE : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item() SCREAMING_SNAKE_CASE : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) SCREAMING_SNAKE_CASE : Optional[Any] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): SCREAMING_SNAKE_CASE : Optional[int] = cos_dist[i][concept_idx] SCREAMING_SNAKE_CASE : List[str] = self.concept_embeds_weights[concept_idx].item() SCREAMING_SNAKE_CASE : Dict = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(UpperCamelCase__ ) result.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def __A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.vision_model(UpperCamelCase__ )[1] # pooled_output SCREAMING_SNAKE_CASE : Union[str, Any] = self.visual_projection(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds ) SCREAMING_SNAKE_CASE : Any = cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images SCREAMING_SNAKE_CASE : int = 0.0 SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) SCREAMING_SNAKE_CASE : Any = torch.any(special_scores > 0 , dim=1 ) SCREAMING_SNAKE_CASE : Any = special_care * 0.01 SCREAMING_SNAKE_CASE : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) SCREAMING_SNAKE_CASE : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) SCREAMING_SNAKE_CASE : Tuple = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
248
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( UpperCamelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , *_A : str , **_A : List[str] ): """simple docstring""" warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''' , __a , ) super().__init__(*__a , **__a )
717
from __future__ import annotations def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = str(snake_case ) return n == n[::-1] def a__ ( snake_case = 1_000_000 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = 0 for i in range(1 , snake_case ): if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split('''b''' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
131
0
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def lowerCamelCase__ ( a__ , a__ = "cpu" , a__ = None) -> Dict: """simple docstring""" _snake_case : Optional[int] = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_) for k, v in tqdm(state_dict.items()): if not isinstance(UpperCAmelCase_ , torch.Tensor): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin') _snake_case : str = v.half() if save_path is None: # overwrite src_path _snake_case : str = src_path torch.save(UpperCAmelCase_ , UpperCAmelCase_) if __name__ == "__main__": fire.Fire(convert)
517
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_: Optional[Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_: str = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowercase_: int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
648
0
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class _UpperCAmelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self , lowerCAmelCase_="" , lowerCAmelCase_="train" ): '''simple docstring''' assert os.path.isdir(lowerCAmelCase_ ) a_ : int = [] a_ : Optional[Any] = os.listdir(lowerCAmelCase_ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue a_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) if not os.path.isfile(lowerCAmelCase_ ): continue self.documents.append(lowerCAmelCase_ ) def __len__( self ): '''simple docstring''' return len(self.documents ) def __getitem__( self , lowerCAmelCase_ ): '''simple docstring''' a_ : Optional[int] = self.documents[idx] a_ : Optional[Any] = document_path.split("""/""" )[-1] with open(lowerCAmelCase_ , encoding="""utf-8""" ) as source: a_ : Optional[Any] = source.read() a_ , a_ : List[Any] = process_story(lowerCAmelCase_ ) return document_name, story_lines, summary_lines def _snake_case ( A_ : Union[str, Any] ): """simple docstring""" a_ : List[Any] = list(filter(lambda A_ : len(A_ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it a_ : int = [_add_missing_period(A_ ) for line in nonempty_lines] # gather article lines a_ : Optional[Any] = [] a_ : List[Any] = deque(A_ ) while True: try: a_ : List[Any] = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(A_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines a_ : Union[str, Any] = list(filter(lambda A_ : not t.startswith("""@highlight""" ) , A_ ) ) return story_lines, summary_lines def _snake_case ( A_ : str ): """simple docstring""" a_ : Union[str, Any] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def _snake_case ( A_ : Any , A_ : List[Any] , A_ : List[str] ): """simple docstring""" if len(A_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(A_ )) ) return sequence def _snake_case ( A_ : Union[str, Any] , A_ : Optional[int] ): """simple docstring""" a_ : Optional[int] = torch.ones_like(A_ ) a_ : Optional[int] = sequence == pad_token_id a_ : List[Any] = 0 return mask def _snake_case ( A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Tuple ): """simple docstring""" a_ : Optional[Any] = [tokenizer.encode(A_ ) for line in story_lines] a_ : Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence] a_ : Any = [tokenizer.encode(A_ ) for line in summary_lines] a_ : Optional[Any] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def _snake_case ( A_ : int , A_ : Tuple ): """simple docstring""" a_ : Dict = [] for sequence in batch: a_ : List[str] = -1 a_ : Any = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(A_ ) return torch.tensor(A_ )
460
'''simple docstring''' from __future__ import annotations def _snake_case ( A_ : int ): """simple docstring""" a_ : Optional[Any] = 2 a_ : int = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(A_ ) if n > 1: factors.append(A_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
460
1
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class snake_case : def __init__( self : int , a_ : int , a_ : Union[str, Any]=13 , a_ : List[str]=7 , a_ : int=True , a_ : List[str]=True , a_ : List[Any]=False , a_ : Optional[Any]=True , a_ : Optional[Any]=99 , a_ : List[Any]=64 , a_ : Optional[int]=5 , a_ : str=4 , a_ : List[Any]=64 , a_ : int="gelu" , a_ : List[str]=0.1 , a_ : List[Any]=0.1 , a_ : Optional[Any]=512 , a_ : Union[str, Any]=16 , a_ : int=2 , a_ : Dict=0.02 , a_ : Optional[Any]=3 , a_ : List[Any]=4 , a_ : Optional[int]=None , )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = parent SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : int = use_input_mask SCREAMING_SNAKE_CASE__ : str = use_token_type_ids SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : Any = hidden_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = type_vocab_size SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices SCREAMING_SNAKE_CASE__ : List[Any] = scope def __lowercase( self : Dict )-> List[str]: """simple docstring""" return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def __lowercase( self : Any )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Dict = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Any = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase( self : Optional[int] )-> Dict: """simple docstring""" return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __lowercase( self : int , a_ : Any , a_ : Dict , a_ : Dict , a_ : Optional[Any] , a_ : List[Any] , a_ : Optional[int] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = MPNetModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowercase( self : Tuple , a_ : int , a_ : Union[str, Any] , a_ : List[str] , a_ : Optional[Any] , a_ : Tuple , a_ : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = MPNetForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowercase( self : str , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Dict , a_ : int , a_ : List[Any] , a_ : List[str] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE__ : int = MPNetForSequenceClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_choices SCREAMING_SNAKE_CASE__ : Any = MPNetForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Tuple = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowercase( self : int , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : List[str] , a_ : Any )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels SCREAMING_SNAKE_CASE__ : Any = MPNetForTokenClassification(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Dict = config_and_inputs SCREAMING_SNAKE_CASE__ : str = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False lowercase_ = True def __lowercase( self : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = MPNetModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*a_ ) def __lowercase( self : Dict )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ ) def __lowercase( self : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ ) def __lowercase( self : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*a_ ) def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*a_ ) @require_torch class snake_case ( unittest.TestCase ): @slow def __lowercase( self : str )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' ) SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )[0] SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a_ ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
85
def a ( A__ : int ) -> bool: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') lowercase_ = int(input('Enter number: ').strip()) print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
291
0
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class __lowerCAmelCase : snake_case : CommonSchedulerState # setable values snake_case : jnp.ndarray snake_case : jnp.ndarray snake_case : Optional[int] = None @classmethod def snake_case_ (cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): return cls(common=lowerCAmelCase__ , init_noise_sigma=lowerCAmelCase__ , timesteps=lowerCAmelCase__ ) @dataclass class __lowerCAmelCase ( __a ): snake_case : DDPMSchedulerState class __lowerCAmelCase ( __a , __a ): snake_case : int = [e.name for e in FlaxKarrasDiffusionSchedulers] snake_case : jnp.dtype @property def snake_case_ (self ): return True @register_to_config def __init__(self , lowerCAmelCase__ = 1_0_0_0 , lowerCAmelCase__ = 0.0_0_0_1 , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fixed_small" , lowerCAmelCase__ = True , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = jnp.floataa , ): _UpperCAmelCase : Tuple = dtype def snake_case_ (self , lowerCAmelCase__ = None ): if common is None: _UpperCAmelCase : Union[str, Any] = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution _UpperCAmelCase : Optional[int] = jnp.array(1.0 , dtype=self.dtype ) _UpperCAmelCase : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowerCAmelCase__ , init_noise_sigma=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ): return sample def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = () ): _UpperCAmelCase : List[str] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 _UpperCAmelCase : List[Any] = (jnp.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ): _UpperCAmelCase : Optional[int] = state.common.alphas_cumprod[t] _UpperCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _UpperCAmelCase : Optional[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: _UpperCAmelCase : Any = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": _UpperCAmelCase : str = jnp.clip(lowerCAmelCase__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": _UpperCAmelCase : Tuple = jnp.log(jnp.clip(lowerCAmelCase__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": _UpperCAmelCase : str = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log _UpperCAmelCase : List[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": _UpperCAmelCase : Optional[int] = variance _UpperCAmelCase : Optional[Any] = state.common.betas[t] _UpperCAmelCase : Optional[Any] = (predicted_variance + 1) / 2 _UpperCAmelCase : Optional[int] = frac * max_log + (1 - frac) * min_log return variance def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , ): _UpperCAmelCase : Union[str, Any] = timestep if key is None: _UpperCAmelCase : List[str] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: _UpperCAmelCase , _UpperCAmelCase : Dict = jnp.split(lowerCAmelCase__ , sample.shape[1] , axis=1 ) else: _UpperCAmelCase : Optional[int] = None # 1. compute alphas, betas _UpperCAmelCase : Optional[Any] = state.common.alphas_cumprod[t] _UpperCAmelCase : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) _UpperCAmelCase : str = 1 - alpha_prod_t _UpperCAmelCase : Tuple = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _UpperCAmelCase : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _UpperCAmelCase : Optional[Any] = model_output elif self.config.prediction_type == "v_prediction": _UpperCAmelCase : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " """ for the FlaxDDPMScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: _UpperCAmelCase : List[Any] = jnp.clip(lowerCAmelCase__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCAmelCase : Any = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t _UpperCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCAmelCase : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): _UpperCAmelCase : List[Any] = jax.random.split(lowerCAmelCase__ , num=1 ) _UpperCAmelCase : Any = jax.random.normal(lowerCAmelCase__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ ) ** 0.5) * noise _UpperCAmelCase : Any = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) _UpperCAmelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase__ , state=lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): return add_noise_common(state.common , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): return get_velocity_common(state.common , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __len__(self ): return self.config.num_train_timesteps
156
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__a ): snake_case : str = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : List[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Union[str, Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[int] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Any = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Tuple = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : List[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : str = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : List[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Dict = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[int] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : str = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[int] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Tuple = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : List[str] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Dict = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Tuple = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : int = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Union[str, Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[int] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : str = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Any = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Union[str, Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : List[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Optional[int] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : List[Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] ) class __lowerCAmelCase ( metaclass=__a ): snake_case : Union[str, Any] = ["""sentencepiece"""] def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ): requires_backends(self , ["""sentencepiece"""] )
156
1
from math import ceil def _a ( SCREAMING_SNAKE_CASE = 10_01 ): """simple docstring""" lowercase__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowercase__ = 2 * i + 1 lowercase__ = 2 * i lowercase__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: lowerCAmelCase = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
43
'''simple docstring''' from __future__ import annotations from collections import deque class _lowerCamelCase : '''simple docstring''' def __init__( self : Tuple , _A : list[str] ) -> Optional[Any]: __magic_name__ : list[dict] = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(_A ) self.set_fail_transitions() def __lowerCAmelCase ( self : Any , _A : int , _A : str ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def __lowerCAmelCase ( self : Dict , _A : str ) -> None: __magic_name__ : Any = 0 for character in keyword: __magic_name__ : Tuple = self.find_next_state(_A , _A ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __magic_name__ : Optional[int] = len(self.adlist ) - 1 else: __magic_name__ : int = next_state self.adlist[current_state]["output"].append(_A ) def __lowerCAmelCase ( self : str ) -> None: __magic_name__ : deque = deque() for node in self.adlist[0]["next_states"]: q.append(_A ) __magic_name__ : str = 0 while q: __magic_name__ : int = q.popleft() for child in self.adlist[r]["next_states"]: q.append(_A ) __magic_name__ : int = self.adlist[r]['fail_state'] while ( self.find_next_state(_A , self.adlist[child]['value'] ) is None and state != 0 ): __magic_name__ : str = self.adlist[state]['fail_state'] __magic_name__ : Tuple = self.find_next_state( _A , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: __magic_name__ : Union[str, Any] = 0 __magic_name__ : List[str] = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def __lowerCAmelCase ( self : Optional[Any] , _A : str ) -> dict[str, list[int]]: __magic_name__ : dict = {} # returns a dict with keywords and list of its occurrences __magic_name__ : Dict = 0 for i in range(len(_A ) ): while ( self.find_next_state(_A , string[i] ) is None and current_state != 0 ): __magic_name__ : Union[str, Any] = self.adlist[current_state]['fail_state'] __magic_name__ : Optional[Any] = self.find_next_state(_A , string[i] ) if next_state is None: __magic_name__ : List[Any] = 0 else: __magic_name__ : Dict = next_state for key in self.adlist[current_state]["output"]: if key not in result: __magic_name__ : Dict = [] result[key].append(i - len(_A ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
561
0
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCamelCase_ : '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : Union[str, Any]=[1, 1, 2] , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu_new" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=False , ) ->Optional[int]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = block_sizes A__ = num_decoder_layers A__ = d_model A__ = n_head A__ = d_head A__ = d_inner A__ = hidden_act A__ = hidden_dropout A__ = attention_dropout A__ = activation_dropout A__ = max_position_embeddings A__ = type_vocab_size A__ = 2 A__ = num_labels A__ = num_choices A__ = scope A__ = initializer_std # Used in the tests to check the size of the first attention layer A__ = n_head # Used in the tests to check the size of the first hidden state A__ = self.d_model # Used in the tests to check the number of output hidden states/attentions A__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: A__ = self.num_hidden_layers + 2 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , ) ->Optional[Any]: '''simple docstring''' A__ = TFFunnelModel(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCamelCase__) A__ = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) A__ = False A__ = TFFunnelModel(config=UpperCamelCase__) A__ = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) A__ = False A__ = TFFunnelModel(config=UpperCamelCase__) A__ = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = TFFunnelBaseModel(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCamelCase__) A__ = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) A__ = False A__ = TFFunnelBaseModel(config=UpperCamelCase__) A__ = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) A__ = False A__ = TFFunnelBaseModel(config=UpperCamelCase__) A__ = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = TFFunnelForPreTraining(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , ) ->Optional[Any]: '''simple docstring''' A__ = TFFunnelForMaskedLM(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , ) ->List[str]: '''simple docstring''' A__ = self.num_labels A__ = TFFunnelForSequenceClassification(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , ) ->Optional[int]: '''simple docstring''' A__ = self.num_choices A__ = TFFunnelForMultipleChoice(config=UpperCamelCase__) A__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , ) ->Optional[int]: '''simple docstring''' A__ = self.num_labels A__ = TFFunnelForTokenClassification(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , ) ->str: '''simple docstring''' A__ = TFFunnelForQuestionAnswering(config=UpperCamelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCamelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel), '''fill-mask''': TFFunnelForMaskedLM, '''question-answering''': TFFunnelForQuestionAnswering, '''text-classification''': TFFunnelForSequenceClassification, '''token-classification''': TFFunnelForTokenClassification, '''zero-shot''': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' A__ = TFFunnelModelTester(self) A__ = ConfigTester(self , config_class=UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__) @require_tf class UpperCamelCase_ ( _A , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: '''simple docstring''' A__ = TFFunnelModelTester(self , base=UpperCamelCase__) A__ = ConfigTester(self , config_class=UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Any: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__)
710
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Union[str, Any]=30 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : List[Any]=None , ) ->Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str) ->Tuple: '''simple docstring''' A__ = TFViTModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(UpperCAmelCase__ , interpolate_pos_encoding=UpperCAmelCase__ , training=UpperCAmelCase__) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]) ->List[str]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(UpperCAmelCase__ , interpolate_pos_encoding=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = TFViTModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : str) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , tf.keras.layers.Layer)) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''') self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.2744, 0.8215, -0.0836]) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)
177
0
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __a : List[str] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class __UpperCAmelCase ( snake_case__ , unittest.TestCase ): """simple docstring""" lowercase = GPTSwaTokenizer lowercase = False lowercase = True lowercase = False def __lowerCAmelCase ( self ) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase = GPTSwaTokenizer(SCREAMING_SNAKE_CASE , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" UpperCamelCase = "This is a test" UpperCamelCase = "This is a test" return input_text, output_text def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = "<s>" UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2000 ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = GPTSwaTokenizer(SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [465, 287, 265, 631, 842] ) UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on UpperCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) self.assertListEqual( SCREAMING_SNAKE_CASE , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) UpperCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) # fmt: off self.assertListEqual( SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def __lowerCAmelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = GPTSwaTokenizer(SCREAMING_SNAKE_CASE ) UpperCamelCase = ["This is a test", "I was born in 92000, and this is falsé."] UpperCamelCase = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # Test that decode_fast returns the input text for text, token_ids in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" UpperCamelCase = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off UpperCamelCase = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="AI-Sweden/gpt-sw3-126m" , sequences=SCREAMING_SNAKE_CASE , )
606
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __UpperCAmelCase ( snake_case__ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: UpperCamelCase = ( f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`''' f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ''' "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) UpperCamelCase = dict(scheduler.config ) UpperCamelCase = 1 UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: UpperCamelCase = ( f'''The configuration file of this scheduler: {scheduler} has not set the configuration''' " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) UpperCamelCase = dict(scheduler.config ) UpperCamelCase = True UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCamelCase = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" UpperCamelCase = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) UpperCamelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCamelCase = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
606
1
'''simple docstring''' from __future__ import annotations def lowerCAmelCase__ ( UpperCAmelCase ): """simple docstring""" snake_case__ : str = 0.0_0 snake_case__ : int = 0 for resistor in resistors: if resistor <= 0: snake_case__ : str = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(lowercase_ ) first_sum += 1 / float(lowercase_ ) index += 1 return 1 / first_sum def lowerCAmelCase__ ( UpperCAmelCase ): """simple docstring""" snake_case__ : str = 0.0_0 snake_case__ : Union[str, Any] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: snake_case__ : int = f"""Resistor at index {index} has a negative value!""" raise ValueError(lowercase_ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
707
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCAmelCase__ = 2 class _A : '''simple docstring''' def __init__( self : List[Any] , *, # begin keyword-only arguments lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : str="<pad>" , lowerCamelCase : str="</s>" , lowerCamelCase : int="<unk>" , lowerCamelCase : Tuple=None , )-> str: snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = bos, unk, pad, eos snake_case__ : Dict = [] snake_case__ : int = [] snake_case__ : Optional[int] = {} snake_case__ : int = self.add_symbol(lowerCamelCase ) snake_case__ : Optional[int] = self.add_symbol(lowerCamelCase ) snake_case__ : List[str] = self.add_symbol(lowerCamelCase ) snake_case__ : int = self.add_symbol(lowerCamelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(lowerCamelCase ) snake_case__ : int = len(self.symbols ) def __eq__( self : str , lowerCamelCase : Tuple )-> Optional[Any]: return self.indices == other.indices def __getitem__( self : Optional[int] , lowerCamelCase : Any )-> Tuple: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Any )-> Union[str, Any]: return len(self.symbols ) def __contains__( self : Tuple , lowerCamelCase : int )-> int: return sym in self.indices @classmethod def __lowerCAmelCase ( cls : Dict , lowerCamelCase : Union[str, Any] )-> str: snake_case__ : List[str] = cls() d.add_from_file(lowerCamelCase ) return d def __lowerCAmelCase ( self : int , lowerCamelCase : int , lowerCamelCase : List[Any]=1 , lowerCamelCase : Union[str, Any]=False )-> Any: if word in self.indices and not overwrite: snake_case__ : Union[str, Any] = self.indices[word] snake_case__ : str = self.count[idx] + n return idx else: snake_case__ : Any = len(self.symbols ) snake_case__ : Optional[int] = idx self.symbols.append(lowerCamelCase ) self.count.append(lowerCamelCase ) return idx def __lowerCAmelCase ( self : Any , lowerCamelCase : List[Any] )-> Dict: return 0 def __lowerCAmelCase ( self : int , lowerCamelCase : str )-> Optional[int]: if isinstance(lowerCamelCase , lowerCamelCase ): try: with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(lowerCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(lowerCamelCase ) ) return snake_case__ : Union[str, Any] = f.readlines() snake_case__ : Optional[Any] = self._load_meta(lowerCamelCase ) for line in lines[indices_start_line:]: try: snake_case__ , snake_case__ : Optional[int] = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": snake_case__ : str = True snake_case__ , snake_case__ : Any = line.rsplit(""" """ , 1 ) else: snake_case__ : Dict = False snake_case__ : Optional[int] = int(lowerCamelCase ) snake_case__ : List[str] = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(lowerCamelCase ) ) self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def lowerCAmelCase__ ( UpperCAmelCase ): """simple docstring""" snake_case__ : List[str] = dict((re.sub(R"""@@$""" , """""" , UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCAmelCase ), v) for k, v in d.items() ) snake_case__ : str = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] snake_case__ : Optional[Any] = d[k] # restore return da def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" if not os.path.exists(UpperCAmelCase ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models snake_case__ : Tuple = os.path.join(UpperCAmelCase , """checkpoint.pt""" ) if not os.path.isfile(UpperCAmelCase ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) snake_case__ : str = torch.load(UpperCAmelCase , map_location="""cpu""" ) snake_case__ : List[Any] = chkpt["""cfg"""]["""model"""] # dicts snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , """dict.txt""" ) if not os.path.isfile(UpperCAmelCase ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) snake_case__ : List[str] = Dictionary.load(UpperCAmelCase ) snake_case__ : Optional[int] = rewrite_dict_keys(src_dict.indices ) snake_case__ : Tuple = len(UpperCAmelCase ) snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) ) # merges_file (bpecodes) snake_case__ : Union[str, Any] = os.path.join(UpperCAmelCase , """bpecodes""" ) if not os.path.isfile(UpperCAmelCase ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) snake_case__ : Tuple = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(UpperCAmelCase , UpperCAmelCase ) # model config snake_case__ : str = os.path.join(UpperCAmelCase , """config.json""" ) snake_case__ : Dict = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.0_2, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1E-1_2, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) ) # tokenizer config snake_case__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase ) snake_case__ : List[str] = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) ) # model snake_case__ : int = chkpt["""model"""] # remove unneeded keys snake_case__ : List[Any] = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(UpperCAmelCase , UpperCAmelCase ) snake_case__ : List[Any] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): snake_case__ : str = model_state_dict.pop(UpperCAmelCase ) else: snake_case__ : Optional[int] = model_state_dict.pop(UpperCAmelCase ) snake_case__ : Tuple = BioGptConfig.from_pretrained(UpperCAmelCase ) snake_case__ : Optional[int] = BioGptForCausalLM(UpperCAmelCase ) # check that it loads ok model_new.load_state_dict(UpperCAmelCase ) # save snake_case__ : Dict = os.path.join(UpperCAmelCase , UpperCAmelCase ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(UpperCAmelCase , UpperCAmelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase__ = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
172
0
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = (DDIMParallelScheduler,) _lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def UpperCAmelCase__ ( self , **_lowercase ) -> int: '''simple docstring''' snake_case_ : Optional[int] = { """num_train_timesteps""": 1_0_0_0, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """clip_sample""": True, } config.update(**_lowercase ) return config def UpperCAmelCase__ ( self , **_lowercase ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self.scheduler_classes[0] snake_case_ : Dict = self.get_scheduler_config(**_lowercase ) snake_case_ : Optional[Any] = scheduler_class(**_lowercase ) snake_case_ , snake_case_ : int = 1_0, 0.0 snake_case_ : Any = self.dummy_model() snake_case_ : List[str] = self.dummy_sample_deter scheduler.set_timesteps(_lowercase ) for t in scheduler.timesteps: snake_case_ : Dict = model(_lowercase , _lowercase ) snake_case_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample return sample def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_lowercase ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowercase ) snake_case_ : str = self.scheduler_classes[0] snake_case_ : Optional[int] = self.get_scheduler_config(steps_offset=1 ) snake_case_ : Any = scheduler_class(**_lowercase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowercase ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowercase ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_lowercase ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_lowercase ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' self.check_over_configs(thresholding=_lowercase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=_lowercase ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_lowercase , eta=_lowercase ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = self.scheduler_classes[0] snake_case_ : List[str] = self.get_scheduler_config() snake_case_ : str = scheduler_class(**_lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = self.scheduler_classes[0] snake_case_ : str = self.get_scheduler_config() snake_case_ : Any = scheduler_class(**_lowercase ) snake_case_ , snake_case_ : List[str] = 1_0, 0.0 scheduler.set_timesteps(_lowercase ) snake_case_ : Tuple = self.dummy_model() snake_case_ : List[str] = self.dummy_sample_deter snake_case_ : List[str] = self.dummy_sample_deter + 0.1 snake_case_ : List[Any] = self.dummy_sample_deter - 0.1 snake_case_ : Tuple = samplea.shape[0] snake_case_ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) snake_case_ : int = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase ) snake_case_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) snake_case_ : Optional[Any] = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase ) snake_case_ : Dict = torch.sum(torch.abs(_lowercase ) ) snake_case_ : List[str] = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ : List[Any] = self.full_loop() snake_case_ : Tuple = torch.sum(torch.abs(_lowercase ) ) snake_case_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.22_3967 ) < 1E-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ : List[Any] = self.full_loop(prediction_type="""v_prediction""" ) snake_case_ : Tuple = torch.sum(torch.abs(_lowercase ) ) snake_case_ : Dict = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 ) snake_case_ : Any = torch.sum(torch.abs(_lowercase ) ) snake_case_ : List[str] = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 ) snake_case_ : Optional[Any] = torch.sum(torch.abs(_lowercase ) ) snake_case_ : Dict = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
58
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : int = 3_2 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , lowerCamelCase__ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , lowerCamelCase__ : bool = True , lowerCamelCase__ : List[str]=7 , lowerCamelCase__ : int=3_0 , lowerCamelCase__ : List[Any]=4_0_0 , lowerCamelCase__ : int=3 , ) -> Optional[Any]: """simple docstring""" A_ = parent A_ = do_resize A_ = size if size is not None else {'''shortest_edge''': 2_8_8} A_ = size_divisor A_ = do_rescale A_ = rescale_factor A_ = do_normalize A_ = do_center_crop A_ = image_mean A_ = image_std A_ = do_pad A_ = batch_size A_ = num_channels A_ = min_resolution A_ = max_resolution def UpperCamelCase ( self : str ) -> int: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=False ) -> Dict: """simple docstring""" if not batched: A_ = self.size['''shortest_edge'''] A_ = image_inputs[0] if isinstance(lowerCamelCase__ , Image.Image ): A_ ,A_ = image.size else: A_ ,A_ = image.shape[1], image.shape[2] A_ = size / min(lowerCamelCase__ , lowerCamelCase__ ) if h < w: A_ ,A_ = size, scale * w else: A_ ,A_ = scale * h, size A_ = int((1_3_3_3 / 8_0_0) * size ) if max(lowerCamelCase__ , lowerCamelCase__ ) > max_size: A_ = max_size / max(lowerCamelCase__ , lowerCamelCase__ ) A_ = newh * scale A_ = neww * scale A_ ,A_ = int(newh + 0.5 ), int(neww + 0.5 ) A_ ,A_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: A_ = [] for image in image_inputs: A_ ,A_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0] A_ = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowercase ( __lowerCamelCase,unittest.TestCase ): _lowercase : List[str] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCamelCase ( self : int ) -> Tuple: """simple docstring""" A_ = BridgeTowerImageProcessingTester(self ) @property def UpperCamelCase ( self : str ) -> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self : Tuple ) -> List[Any]: """simple docstring""" A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(lowerCamelCase__ , '''size_divisor''' ) ) def UpperCamelCase ( self : Optional[int] ) -> str: """simple docstring""" pass def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values A_ ,A_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values A_ ,A_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self : str ) -> Optional[int]: """simple docstring""" A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values A_ ,A_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values A_ ,A_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values A_ ,A_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values A_ ,A_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
203
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class _A : # setable values __a = None __a = None __a = None # sigma(t_i) @classmethod def _lowerCamelCase ( cls ) -> Any: return cls() @dataclass class _A ( __a ): __a = 42 __a = 42 __a = 42 class _A ( __a , __a ): @property def _lowerCamelCase ( self ) -> Optional[Any]: return True @register_to_config def __init__( self , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = 1.0_07 , SCREAMING_SNAKE_CASE__ = 80 , SCREAMING_SNAKE_CASE__ = 0.05 , SCREAMING_SNAKE_CASE__ = 50 , ) -> Dict: pass def _lowerCamelCase ( self ) -> Dict: return KarrasVeSchedulerState.create() def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = () ) -> KarrasVeSchedulerState: lowerCamelCase__ = jnp.arange(0 , SCREAMING_SNAKE_CASE__ )[::-1].copy() lowerCamelCase__ = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE__ , schedule=jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) , timesteps=SCREAMING_SNAKE_CASE__ , ) def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase__ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase__ = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase__ = random.split(SCREAMING_SNAKE_CASE__ , num=1 ) lowerCamelCase__ = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE__ , shape=sample.shape ) lowerCamelCase__ = sigma + gamma * sigma lowerCamelCase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: lowerCamelCase__ = sample_hat + sigma_hat * model_output lowerCamelCase__ = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: lowerCamelCase__ = sample_prev + sigma_prev * model_output lowerCamelCase__ = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: raise NotImplementedError()
707
"""simple docstring""" # Algorithm for the pigeonhole sorting def UpperCAmelCase__ ( A__ ) -> Dict: """simple docstring""" lowerCamelCase__ = min(A__ ) # min() finds the minimum value lowerCamelCase__ = max(A__ ) # max() finds the maximum value lowerCamelCase__ = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowerCamelCase__ = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(A__ , A__ ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowerCamelCase__ = 0 for count in range(A__ ): while holes[count] > 0: holes[count] -= 1 lowerCamelCase__ = count + min_val i += 1 def UpperCAmelCase__ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__ = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(A__ ) print("Sorted order is:" , " ".join(A__ ) ) if __name__ == "__main__": main()
274
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _a: List[Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = ['pixel_values'] def __init__( self : Optional[int] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : Tuple , ): '''simple docstring''' super().__init__(**lowerCAmelCase ) UpperCAmelCase_ = size if size is not None else {"height": 256, "width": 256} UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = crop_size UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ): '''simple docstring''' UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" ) return resize( lowerCAmelCase , size=(size["height"], size["width"]) , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ): '''simple docstring''' UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" ) return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ): '''simple docstring''' return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ): '''simple docstring''' return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Union[str, Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Tuple=None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ): '''simple docstring''' UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" ) UpperCAmelCase_ = make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images] if do_center_crop: UpperCAmelCase_ = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
162
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowercase , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = GPTaTokenizer SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {'add_prefix_space': True} SCREAMING_SNAKE_CASE__ = False def __A ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ = {"unk_token": "<unk>"} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase ) ) def __A ( self : Optional[int] , **lowerCAmelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : Union[str, Any] , **lowerCAmelCase : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : Optional[Any] , lowerCAmelCase : int ): '''simple docstring''' UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = "lower newer" return input_text, output_text def __A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase_ = tokens + [tokenizer.unk_token] UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase ) def __A ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = "lower newer" # Testing tokenization UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # Testing conversion to ids without special tokens UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # Testing conversion to ids with special tokens UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # Testing the unknown token UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token] UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase ) def __A ( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : int ): '''simple docstring''' pass def __A ( self : str , lowerCAmelCase : List[str]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) # Simple input UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase_ = ("This is a simple input", "This is a pair") UpperCAmelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Simple input self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Simple input self.assertRaises( lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , ) # Pair input self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Pair input self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Pair input self.assertRaises( lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , ) def __A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"] UpperCAmelCase_ = ("This is a simple input", "This is a pair") UpperCAmelCase_ = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] UpperCAmelCase_ = tokenizer.pad_token_id UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" ) UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" ) UpperCAmelCase_ = tokenizer(*lowerCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" ) UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = "$$$" UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase , add_bos_token=lowerCAmelCase ) UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase_ = tokenizer.bos_token_id UpperCAmelCase_ = tokenizer(lowerCAmelCase ) UpperCAmelCase_ = tokenizer(lowerCAmelCase ) self.assertEqual(out_s.input_ids[0] , lowerCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) UpperCAmelCase_ = tokenizer.decode(out_s.input_ids ) UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowerCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __A ( self : int ): '''simple docstring''' pass def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = [self.get_tokenizer(do_lower_case=lowerCAmelCase , add_bos_token=lowerCAmelCase )] for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): UpperCAmelCase_ = "Encode this." UpperCAmelCase_ = "This one too please." UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) encoded_sequence += tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) UpperCAmelCase_ = tokenizer.encode_plus( lowerCAmelCase , lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , ) UpperCAmelCase_ = encoded_sequence_dict["input_ids"] UpperCAmelCase_ = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) ) UpperCAmelCase_ = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase ) ] UpperCAmelCase_ = [x for x in filtered_sequence if x is not None] self.assertEqual(lowerCAmelCase , lowerCAmelCase ) @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase ) UpperCAmelCase_ = "A photo of a cat" UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained("./test_opt" ) UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] ) def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase ) UpperCAmelCase_ = "A photo of a cat" UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) # Same as above self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase ) UpperCAmelCase_ = "bos" UpperCAmelCase_ = tokenizer.get_vocab()["bos"] UpperCAmelCase_ = "A photo of a cat" UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) # We changed the bos token self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
162
1
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : List[str] ): UpperCAmelCase = torch.nn.Linear(10 , 10 ) UpperCAmelCase = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase = Accelerator() UpperCAmelCase = accelerator.prepare(a__ ) try: pickle.loads(pickle.dumps(a__ ) ) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}" ) AcceleratorState._reset_state()
705
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name a__ : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=8 ) -> str: """simple docstring""" UpperCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Tuple , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : VQModel , ): super().__init__() self.register_modules( unet=a__ , scheduler=a__ , movq=a__ , ) UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __snake_case ( self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : int , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] ): if latents is None: UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase = latents.to(a__ ) UpperCAmelCase = latents * scheduler.init_noise_sigma return latents def __snake_case ( self : Optional[Any] , a__ : Union[str, Any]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) UpperCAmelCase = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(a__ , a__ ) def __snake_case ( self : Union[str, Any] , a__ : List[str]=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) UpperCAmelCase = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=a__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase, UpperCAmelCase = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ ) # We'll offload the last model manually. UpperCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __snake_case ( self : List[Any] ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(a__ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(a__ ) def __call__( self : Union[str, Any] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : torch.FloatTensor , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ): UpperCAmelCase = self._execution_device UpperCAmelCase = guidance_scale > 1.0 if isinstance(a__ , a__ ): UpperCAmelCase = torch.cat(a__ , dim=0 ) if isinstance(a__ , a__ ): UpperCAmelCase = torch.cat(a__ , dim=0 ) if isinstance(a__ , a__ ): UpperCAmelCase = torch.cat(a__ , dim=0 ) UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: UpperCAmelCase = image_embeds.repeat_interleave(a__ , dim=0 ) UpperCAmelCase = negative_image_embeds.repeat_interleave(a__ , dim=0 ) UpperCAmelCase = hint.repeat_interleave(a__ , dim=0 ) UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ ) UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ ) self.scheduler.set_timesteps(a__ , device=a__ ) UpperCAmelCase = self.scheduler.timesteps UpperCAmelCase = self.movq.config.latent_channels UpperCAmelCase, UpperCAmelCase = downscale_height_and_width(a__ , a__ , self.movq_scale_factor ) # create initial latent UpperCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(a__ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase = {'''image_embeds''': image_embeds, '''hint''': hint} UpperCAmelCase = self.unet( sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0] if do_classifier_free_guidance: UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase, UpperCAmelCase = noise_pred.chunk(2 ) UpperCAmelCase, UpperCAmelCase = variance_pred.chunk(2 ) UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase = self.scheduler.step( a__ , a__ , a__ , generator=a__ , )[0] # post-processing UpperCAmelCase = self.movq.decode(a__ , force_not_quantize=a__ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: UpperCAmelCase = image * 0.5 + 0.5 UpperCAmelCase = image.clamp(0 , 1 ) UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(a__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a__ )
570
0
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=2 ,_lowerCAmelCase=24 ,_lowerCAmelCase=16 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=None ,_lowerCAmelCase=2 ,_lowerCAmelCase=2 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = patch_size lowerCamelCase__ = max_length lowerCamelCase__ = num_mel_bins lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = scope lowerCamelCase__ = frequency_stride lowerCamelCase__ = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCamelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 lowerCamelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1 lowerCamelCase__ = frequency_out_dimension * time_out_dimension lowerCamelCase__ = num_patches + 2 def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, input_values, labels def UpperCamelCase_ ( self ): return ASTConfig( patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = ASTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {"""input_values""": input_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _UpperCamelCase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCamelCase_ ( self ): lowerCamelCase__ = ASTModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""AST does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""input_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = ASTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" ) lowerCamelCase__ , lowerCamelCase__ = torchaudio.load(__lowerCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ) if is_torchaudio_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = self.default_feature_extractor lowerCamelCase__ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_lowerCAmelCase ) lowerCamelCase__ = self.default_feature_extractor lowerCamelCase__ , lowerCamelCase__ = prepare_audio() lowerCamelCase__ = audio.squeeze().numpy() lowerCamelCase__ = feature_extractor(_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 5_27) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
50
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : List[str] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class UpperCAmelCase ( lowercase_): """simple docstring""" lowerCAmelCase_ = """deformable_detr""" lowerCAmelCase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]="relu" , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple="sine" , UpperCamelCase__ : Optional[Any]="resnet50" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=0.25 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): _UpperCamelCase =backbone_config.get('''model_type''' ) _UpperCamelCase =CONFIG_MAPPING[backbone_model_type] _UpperCamelCase =config_class.from_dict(UpperCamelCase__ ) _UpperCamelCase =use_timm_backbone _UpperCamelCase =backbone_config _UpperCamelCase =num_channels _UpperCamelCase =num_queries _UpperCamelCase =max_position_embeddings _UpperCamelCase =d_model _UpperCamelCase =encoder_ffn_dim _UpperCamelCase =encoder_layers _UpperCamelCase =encoder_attention_heads _UpperCamelCase =decoder_ffn_dim _UpperCamelCase =decoder_layers _UpperCamelCase =decoder_attention_heads _UpperCamelCase =dropout _UpperCamelCase =attention_dropout _UpperCamelCase =activation_dropout _UpperCamelCase =activation_function _UpperCamelCase =init_std _UpperCamelCase =init_xavier_std _UpperCamelCase =encoder_layerdrop _UpperCamelCase =auxiliary_loss _UpperCamelCase =position_embedding_type _UpperCamelCase =backbone _UpperCamelCase =use_pretrained_backbone _UpperCamelCase =dilation # deformable attributes _UpperCamelCase =num_feature_levels _UpperCamelCase =encoder_n_points _UpperCamelCase =decoder_n_points _UpperCamelCase =two_stage _UpperCamelCase =two_stage_num_proposals _UpperCamelCase =with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase =class_cost _UpperCamelCase =bbox_cost _UpperCamelCase =giou_cost # Loss coefficients _UpperCamelCase =mask_loss_coefficient _UpperCamelCase =dice_loss_coefficient _UpperCamelCase =bbox_loss_coefficient _UpperCamelCase =giou_loss_coefficient _UpperCamelCase =eos_coefficient _UpperCamelCase =focal_alpha _UpperCamelCase =disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ ) @property def UpperCamelCase__ ( self : Tuple ) -> int: return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Tuple ) -> int: return self.d_model def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]: _UpperCamelCase =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase =self.backbone_config.to_dict() _UpperCamelCase =self.__class__.model_type return output
404
0
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __a : Tuple = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __a : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): """simple docstring""" __lowercase = '''https://pypi.org/pypi/diffusers/json''' __lowercase = json.loads(request.urlopen(lowercase ).read() )['''releases'''].keys() return sorted(lowercase , key=lambda lowercase : version.Version(lowercase ) ) def UpperCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) __lowercase = Path(lowercase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def UpperCAmelCase ( lowercase ): """simple docstring""" init_hf_modules() __lowercase = Path(lowercase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(lowercase , exist_ok=lowercase ) __lowercase = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def UpperCAmelCase ( lowercase ): """simple docstring""" with open(lowercase , '''r''' , encoding='''utf-8''' ) as f: __lowercase = f.read() # Imports of the form `import .xxx` __lowercase = re.findall('''^\s*import\s+\.(\S+)\s*$''' , lowercase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , lowercase , flags=re.MULTILINE ) # Unique-ify return list(set(lowercase ) ) def UpperCAmelCase ( lowercase ): """simple docstring""" __lowercase = False __lowercase = [module_file] __lowercase = [] # Let's recurse through all relative imports while not no_change: __lowercase = [] for f in files_to_check: new_imports.extend(get_relative_imports(lowercase ) ) __lowercase = Path(lowercase ).parent __lowercase = [str(module_path / m ) for m in new_imports] __lowercase = [f for f in new_import_files if f not in all_relative_imports] __lowercase = [F"{f}.py" for f in new_import_files] __lowercase = len(lowercase ) == 0 all_relative_imports.extend(lowercase ) return all_relative_imports def UpperCAmelCase ( lowercase ): """simple docstring""" with open(lowercase , '''r''' , encoding='''utf-8''' ) as f: __lowercase = f.read() # Imports of the form `import xxx` __lowercase = re.findall('''^\s*import\s+(\S+)\s*$''' , lowercase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , lowercase , flags=re.MULTILINE ) # Only keep the top-level module __lowercase = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __lowercase = list(set(lowercase ) ) __lowercase = [] for imp in imports: try: importlib.import_module(lowercase ) except ImportError: missing_packages.append(lowercase ) if len(lowercase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F"{', '.join(lowercase )}. Run `pip install {' '.join(lowercase )}`" ) return get_relative_imports(lowercase ) def UpperCAmelCase ( lowercase , lowercase ): """simple docstring""" __lowercase = module_path.replace(os.path.sep , '''.''' ) __lowercase = importlib.import_module(lowercase ) if class_name is None: return find_pipeline_class(lowercase ) return getattr(lowercase , lowercase ) def UpperCAmelCase ( lowercase ): """simple docstring""" from ..pipelines import DiffusionPipeline __lowercase = dict(inspect.getmembers(lowercase , inspect.isclass ) ) __lowercase = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , lowercase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" F" {loaded_module}." ) __lowercase = cls return pipeline_class def UpperCAmelCase ( lowercase , lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" __lowercase = str(lowercase ) __lowercase = os.path.join(lowercase , lowercase ) if os.path.isfile(lowercase ): __lowercase = module_file_or_url __lowercase = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __lowercase = get_diffusers_versions() # cut ".dev0" __lowercase = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __lowercase = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F"Defaulting to latest_version: {revision}." ) elif revision in available_versions: __lowercase = F"v{revision}" elif revision == "main": __lowercase = revision else: raise ValueError( F"`custom_revision`: {revision} does not exist. Please make sure to choose one of" F" {', '.join(available_versions + ['main'] )}." ) # community pipeline on GitHub __lowercase = COMMUNITY_PIPELINES_URL.format(revision=lowercase , pipeline=lowercase ) try: __lowercase = cached_download( lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , use_auth_token=lowercase , ) __lowercase = '''git''' __lowercase = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." ) raise else: try: # Load from URL or cache if already cached __lowercase = hf_hub_download( lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , use_auth_token=lowercase , ) __lowercase = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." ) raise # Check we have all the requirements in our environment __lowercase = check_imports(lowercase ) # Now we move the module inside our cached dynamic modules. __lowercase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(lowercase ) __lowercase = Path(lowercase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(lowercase , submodule_path / module_file ) for module_needed in modules_needed: __lowercase = F"{module_needed}.py" shutil.copy(os.path.join(lowercase , lowercase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(lowercase , lowercase ): __lowercase = use_auth_token elif use_auth_token is True: __lowercase = HfFolder.get_token() else: __lowercase = None __lowercase = model_info(lowercase , revision=lowercase , token=lowercase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __lowercase = submodule_path / commit_hash __lowercase = full_submodule + os.path.sep + commit_hash create_dynamic_module(lowercase ) if not (submodule_path / module_file).exists(): shutil.copy(lowercase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( lowercase , F"{module_needed}.py" , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , ) return os.path.join(lowercase , lowercase ) def UpperCAmelCase ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ): """simple docstring""" __lowercase = get_cached_module_file( lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , ) return get_class_in_module(lowercase , final_module.replace('''.py''' , '''''' ) )
522
from __future__ import annotations def UpperCAmelCase ( lowercase , lowercase ): """simple docstring""" if b == 0: return (1, 0) ((__lowercase) , (__lowercase)) = extended_euclid(lowercase , a % b ) __lowercase = a // b return (y, x - k * y) def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m def UpperCAmelCase ( lowercase , lowercase ): """simple docstring""" ((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase ) if b < 0: __lowercase = (b % n + n) % n return b def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" __lowercase , __lowercase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase ) __lowercase = na * na __lowercase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="""chinese_remainder_theorem""", verbose=True) testmod(name="""chinese_remainder_theorem2""", verbose=True) testmod(name="""invert_modulo""", verbose=True) testmod(name="""extended_euclid""", verbose=True)
522
1
'''simple docstring''' import cmath import math def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =math.radians(__SCREAMING_SNAKE_CASE ) _UpperCamelCase =math.radians(__SCREAMING_SNAKE_CASE ) # Convert voltage and current to rectangular form _UpperCamelCase =cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCamelCase =cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
404
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {'vocab_file': 'vocab.json'} __lowerCamelCase : Optional[Any] = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } __lowerCamelCase : List[Any] = {'mgp-str': 27} class UpperCAmelCase ( lowercase_): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]="[GO]" , UpperCamelCase__ : Optional[Any]="[GO]" , UpperCamelCase__ : int="[s]" , UpperCamelCase__ : Dict="[GO]" , **UpperCamelCase__ : List[Any] ) -> List[str]: super().__init__( unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , **UpperCamelCase__ , ) with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase =json.load(UpperCamelCase__ ) _UpperCamelCase ={v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self : Union[str, Any] ) -> Tuple: return len(self.vocab ) def UpperCamelCase__ ( self : int ) -> Union[str, Any]: return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : str ) -> List[str]: _UpperCamelCase =[] for s in text: char_tokens.extend(UpperCamelCase__ ) return char_tokens def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> Dict: return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Any: return self.decoder.get(UpperCamelCase__ ) def UpperCamelCase__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase__ ) ) return _UpperCamelCase =os.path.join( UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' ) return (vocab_file,)
404
1
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , *__A :Any , **__A :Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowerCamelCase = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['LayoutLMv3FeatureExtractor'] _lowerCamelCase = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
59
0
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger() @dataclass class lowerCamelCase_: '''simple docstring''' lowercase__ : nn.Module lowercase__ : List[nn.Module] = field(default_factory=A__ ) lowercase__ : list = field(default_factory=A__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): _lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ , nn.Convad ) or isinstance(lowerCamelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCamelCase__ ) def __call__( self , lowerCamelCase__ ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCamelCase__ ) [x.remove() for x in self.handles] return self @property def snake_case__ ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCamelCase_: '''simple docstring''' lowercase__ : nn.Module lowercase__ : nn.Module lowercase__ : int = 0 lowercase__ : List = field(default_factory=A__ ) lowercase__ : List = field(default_factory=A__ ) def __call__( self , lowerCamelCase__ ): _lowerCamelCase = Tracker(self.dest )(lowerCamelCase__ ).parametrized _lowerCamelCase = Tracker(self.src )(lowerCamelCase__ ).parametrized _lowerCamelCase = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip , lowerCamelCase__ ) ) _lowerCamelCase = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip , lowerCamelCase__ ) ) if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise Exception( F"""Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while""" F""" destination module has {len(lowerCamelCase__ )}.""" ) for dest_m, src_m in zip(lowerCamelCase__ , lowerCamelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def lowerCAmelCase_( lowercase_ : str , lowercase_ : ResNetConfig , lowercase_ : Path , lowercase_ : bool = True ) -> Tuple: print(F"""Converting {name}...""" ) with torch.no_grad(): _lowerCamelCase = timm.create_model(lowercase_ , pretrained=lowercase_ ).eval() _lowerCamelCase = ResNetForImageClassification(lowercase_ ).eval() _lowerCamelCase = ModuleTransfer(src=lowercase_ , dest=lowercase_ ) _lowerCamelCase = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(lowercase_ ) assert torch.allclose(from_model(lowercase_ ) , our_model(lowercase_ ).logits ), "The model logits don't match the original one." _lowerCamelCase = F"""resnet{"-".join(name.split("resnet" ) )}""" print(lowercase_ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=lowercase_ , ) # we can use the convnext one _lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , ) print(F"""Pushed {checkpoint_name}""" ) def lowerCAmelCase_( lowercase_ : Path , lowercase_ : str = None , lowercase_ : bool = True ) -> Any: _lowerCamelCase = '''imagenet-1k-id2label.json''' _lowerCamelCase = 10_00 _lowerCamelCase = (1, num_labels) _lowerCamelCase = '''huggingface/label-files''' _lowerCamelCase = num_labels _lowerCamelCase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) ) _lowerCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()} _lowerCamelCase = idalabel _lowerCamelCase = {v: k for k, v in idalabel.items()} _lowerCamelCase = partial(lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ ) _lowerCamelCase = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(lowercase_ , names_to_config[model_name] , lowercase_ , lowercase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return config, expected_shape if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __SCREAMING_SNAKE_CASE : str = parser.parse_args() __SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
661
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __SCREAMING_SNAKE_CASE : str = tuple[int, int] class lowerCamelCase_: '''simple docstring''' def __init__( self , lowerCamelCase__ , lowerCamelCase__ ): _lowerCamelCase = vertices _lowerCamelCase = { (min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items() } def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _lowerCamelCase = weight def snake_case__ ( self ): _lowerCamelCase = Graph({min(self.vertices )} , {} ) _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): _lowerCamelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _lowerCamelCase = edge _lowerCamelCase = weight subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ ) return subgraph def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int: _lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) ) _lowerCamelCase = os.path.join(lowercase_ , lowercase_ ) _lowerCamelCase = {} _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 with open(lowercase_ ) as f: _lowerCamelCase = f.read().strip().split('''\n''' ) _lowerCamelCase = [line.split(''',''' ) for line in data] for edgea in range(1 , len(lowercase_ ) ): for edgea in range(lowercase_ ): if adjaceny_matrix[edgea][edgea] != "-": _lowerCamelCase = int(adjaceny_matrix[edgea][edgea] ) _lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ ) _lowerCamelCase = graph.prims_algorithm() _lowerCamelCase = sum(graph.edges.values() ) _lowerCamelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"""{solution() = }""")
661
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __magic_name__ ( unittest.TestCase ): """simple docstring""" lowerCAmelCase : Optional[int] = inspect.getfile(accelerate.test_utils ) lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) lowerCAmelCase : Dict = ['accelerate', 'launch'] lowerCAmelCase : int = Path.home() / '.cache/huggingface/accelerate' lowerCAmelCase : str = 'default_config.yaml' lowerCAmelCase : Dict = config_folder / config_file lowerCAmelCase : Tuple = config_folder / '_default_config.yaml' lowerCAmelCase : Dict = Path('''tests/test_configs''' ) @classmethod def lowerCAmelCase ( cls : List[Any] ): """simple docstring""" if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCAmelCase ( cls : List[str] ): """simple docstring""" if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCAmelCase ( self : List[str] ): """simple docstring""" _UpperCamelCase: Union[str, Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCAmelCase ( self : Optional[int] ): """simple docstring""" for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=_lowercase ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() ) def lowerCAmelCase ( self : List[Any] ): """simple docstring""" execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __magic_name__ ( unittest.TestCase ): """simple docstring""" lowerCAmelCase : Any = 'test-tpu' lowerCAmelCase : List[Any] = 'us-central1-a' lowerCAmelCase : int = 'ls' lowerCAmelCase : Optional[int] = ['accelerate', 'tpu-config'] lowerCAmelCase : str = 'cd /usr/share' lowerCAmelCase : Optional[int] = 'tests/test_samples/test_command_file.sh' lowerCAmelCase : List[str] = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase ( self : List[str] ): """simple docstring""" _UpperCamelCase: str = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" _UpperCamelCase: int = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Any ): """simple docstring""" _UpperCamelCase: Optional[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Tuple ): """simple docstring""" _UpperCamelCase: Optional[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Tuple ): """simple docstring""" _UpperCamelCase: str = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo \"Hello World\"''', '''--debug''', ] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" _UpperCamelCase: int = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Dict ): """simple docstring""" _UpperCamelCase: Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , ) def lowerCAmelCase ( self : int ): """simple docstring""" _UpperCamelCase: List[str] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , ) def lowerCAmelCase ( self : Any ): """simple docstring""" _UpperCamelCase: str = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=_lowercase , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
701
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __magic_name__ ( __a ): """simple docstring""" def __init__( self : str , *_lowercase : str , **_lowercase : int ): """simple docstring""" warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
264
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase ( __a ): __A : str = ['image_processor', 'tokenizer'] __A : Optional[int] = 'ChineseCLIPImageProcessor' __A : Optional[int] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _lowerCamelCase , ) lowerCAmelCase_ = kwargs.pop('''feature_extractor''' ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_lowerCamelCase , _lowerCamelCase ) lowerCAmelCase_ = self.image_processor def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCAmelCase_ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if images is not None: lowerCAmelCase_ = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def UpperCAmelCase_ ( self ): lowerCAmelCase_ = self.tokenizer.model_input_names lowerCAmelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase_ ( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , ) return self.image_processor_class
274
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A_ : List[Any] =logging.get_logger(__name__) A_ : Optional[int] ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } A_ : Tuple =[ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Dict) -> Optional[Any]: for attribute in key.split('''.'''): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCAmelCase_ = '''lm_head''' lowerCAmelCase_ = getattr(__snake_case , __snake_case) if weight_type is not None: lowerCAmelCase_ = getattr(__snake_case , __snake_case).shape else: lowerCAmelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCAmelCase_ = value elif weight_type == "weight_g": lowerCAmelCase_ = value elif weight_type == "weight_v": lowerCAmelCase_ = value elif weight_type == "bias": lowerCAmelCase_ = value else: lowerCAmelCase_ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''') def snake_case_ ( __snake_case : Tuple , __snake_case : Any , __snake_case : Any) -> Tuple: lowerCAmelCase_ = [] lowerCAmelCase_ = fairseq_model.state_dict() lowerCAmelCase_ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase_ = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) lowerCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase_ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]: lowerCAmelCase_ = True if "*" in mapped_key: lowerCAmelCase_ = name.split(__snake_case)[0].split('''.''')[-2] lowerCAmelCase_ = mapped_key.replace('''*''' , __snake_case) if "weight_g" in name: lowerCAmelCase_ = '''weight_g''' elif "weight_v" in name: lowerCAmelCase_ = '''weight_v''' elif "bias" in name: lowerCAmelCase_ = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase_ = '''weight''' else: lowerCAmelCase_ = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case) continue if not is_used: unused_weights.append(__snake_case) logger.warning(F'''Unused weights: {unused_weights}''') def snake_case_ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Dict) -> str: lowerCAmelCase_ = full_name.split('''conv_layers.''')[-1] lowerCAmelCase_ = name.split('''.''') lowerCAmelCase_ = int(items[0]) lowerCAmelCase_ = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCAmelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCAmelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCAmelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCAmelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(__snake_case) @torch.no_grad() def snake_case_ ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : str=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=True) -> Any: if config_path is not None: lowerCAmelCase_ = UniSpeechConfig.from_pretrained(__snake_case) else: lowerCAmelCase_ = UniSpeechConfig() if is_finetuned: if dict_path: lowerCAmelCase_ = Dictionary.load_from_json(__snake_case) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase_ = target_dict.pad_index lowerCAmelCase_ = target_dict.bos_index lowerCAmelCase_ = target_dict.eos_index lowerCAmelCase_ = len(target_dict.symbols) lowerCAmelCase_ = os.path.join(__snake_case , '''vocab.json''') if not os.path.isdir(__snake_case): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case)) return os.makedirs(__snake_case , exist_ok=__snake_case) lowerCAmelCase_ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCAmelCase_ = 42 lowerCAmelCase_ = 43 with open(__snake_case , '''w''' , encoding='''utf-8''') as vocab_handle: json.dump(__snake_case , __snake_case) lowerCAmelCase_ = WavaVecaPhonemeCTCTokenizer( __snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , ) lowerCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False lowerCAmelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case) processor.save_pretrained(__snake_case) lowerCAmelCase_ = UniSpeechForCTC(__snake_case) else: lowerCAmelCase_ = UniSpeechForPreTraining(__snake_case) if is_finetuned: lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1]), '''w2v_path''': checkpoint_path}) else: lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) lowerCAmelCase_ = model[0].eval() recursively_load_weights(__snake_case , __snake_case , __snake_case) hf_unispeech.save_pretrained(__snake_case) if __name__ == "__main__": A_ : List[Any] =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) A_ : Optional[Any] =parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
274
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _lowercase : List[Any] =logging.get_logger(__name__) _lowercase : int =["""model.decoder.embed_positions.weights"""] def UpperCAmelCase ( lowercase__ : List[str] ): '''simple docstring''' if "emb" in name: a__ = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: a__ = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: a__ = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: a__ = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: a__ = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: a__ = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: a__ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: a__ = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: a__ = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: a__ = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: a__ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def UpperCAmelCase ( lowercase__ : OrderedDict , lowercase__ : int ): '''simple docstring''' a__ = list(state_dict.keys() ) a__ = {} for key in keys: a__ = state_dict.pop(lowercase__ ) a__ = rename_keys(lowercase__ ) if "in_proj_weight" in key: # split fused qkv proj a__ = val[:hidden_size, :] a__ = val[hidden_size : 2 * hidden_size, :] a__ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: a__ = val else: a__ = val return state_dict, enc_dec_proj_state_dict def UpperCAmelCase ( lowercase__ : str ): '''simple docstring''' if checkpoint == "small": # default config values a__ = 1024 a__ = 24 a__ = 16 elif checkpoint == "medium": a__ = 1536 a__ = 48 a__ = 24 elif checkpoint == "large": a__ = 2048 a__ = 48 a__ = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) a__ = MusicgenDecoderConfig( hidden_size=lowercase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase__ , num_attention_heads=lowercase__ , ) return config @torch.no_grad() def UpperCAmelCase ( lowercase__ : Optional[int] , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Dict="cpu" ): '''simple docstring''' a__ = MusicGen.get_pretrained(lowercase__ , device=lowercase__ ) a__ = decoder_config_from_checkpoint(lowercase__ ) a__ = fairseq_model.lm.state_dict() a__ , a__ = rename_state_dict( lowercase__ , hidden_size=decoder_config.hidden_size ) a__ = TaEncoderModel.from_pretrained("""t5-base""" ) a__ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) a__ = MusicgenForCausalLM(lowercase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection a__ , a__ = decoder.load_state_dict(lowercase__ , strict=lowercase__ ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase__ ) if len(lowercase__ ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase__ ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model a__ = MusicgenForConditionalGeneration(text_encoder=lowercase__ , audio_encoder=lowercase__ , decoder=lowercase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase__ ) # check we can do a forward pass a__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) a__ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): a__ = model(input_ids=lowercase__ , decoder_input_ids=lowercase__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor a__ = AutoTokenizer.from_pretrained("""t5-base""" ) a__ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) a__ = MusicgenProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) # set the appropriate bos/pad token ids a__ = 2048 a__ = 2048 # set other default generation config params a__ = int(30 * audio_encoder.config.frame_rate ) a__ = True a__ = 3.0 if pytorch_dump_folder is not None: Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase__ ) processor.push_to_hub(lowercase__ ) if __name__ == "__main__": _lowercase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) _lowercase : Union[str, Any] =parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
412
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowercase : int =logging.get_logger(__name__) _lowercase : List[str] ={ """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class lowerCAmelCase_ ( A_ ,A_ ): '''simple docstring''' A_ : Optional[Any] = 'resnet' A_ : Any = ['basic', 'bottleneck'] def __init__( self , lowerCamelCase=3 , lowerCamelCase=64 , lowerCamelCase=[256, 512, 1024, 2048] , lowerCamelCase=[3, 4, 6, 3] , lowerCamelCase="bottleneck" , lowerCamelCase="relu" , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ): '''simple docstring''' super().__init__(**lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) a__ = num_channels a__ = embedding_size a__ = hidden_sizes a__ = depths a__ = layer_type a__ = hidden_act a__ = downsample_in_first_stage a__ = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowerCamelCase ) + 1 )] a__ , a__ = get_aligned_output_features_output_indices( out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names ) class lowerCAmelCase_ ( A_ ): '''simple docstring''' A_ : int = version.parse('1.11' ) @property def _A ( self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _A ( self ): '''simple docstring''' return 1e-3
412
1
'''simple docstring''' import requests from bsa import BeautifulSoup def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : dict ): A__ = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" ) A__ = soup.find("div" , attrs={"class": "gs_ri"} ) A__ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" ) return anchors[2].get_text() if __name__ == "__main__": __lowerCAmelCase : Any ={ "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
440
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _UpperCAmelCase ( __lowercase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli''' SCREAMING_SNAKE_CASE : Union[str, Any] = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) SCREAMING_SNAKE_CASE : Any = '''text_classifier''' SCREAMING_SNAKE_CASE : Any = AutoTokenizer SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']] SCREAMING_SNAKE_CASE : Dict = ['''text'''] def UpperCamelCase ( self : List[str] ): super().setup() A = self.model.config A = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail' ): A = int(UpperCamelCase__ ) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' ) def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): A = labels return self.pre_processor( [text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ): A = outputs.logits A = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
699
0
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return 1.0 / (1.0 + np.exp(-_outputs )) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = np.max(_outputs , axis=-1 , keepdims=__UpperCAmelCase ) _lowercase : Tuple = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCAmelCase ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = "sigmoid" SCREAMING_SNAKE_CASE_ : List[str] = "softmax" SCREAMING_SNAKE_CASE_ : int = "none" @add_end_docstrings( snake_case , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = ClassificationFunction.NONE def __init__( self ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="" ,**UpperCAmelCase_ ): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" _lowercase : Union[str, Any] = tokenizer_kwargs _lowercase : str = {} if hasattr(self.model.config ,"""return_all_scores""" ) and return_all_scores is None: _lowercase : str = self.model.config.return_all_scores if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or top_k is None: _lowercase : str = top_k _lowercase : str = False elif return_all_scores is not None: warnings.warn( """`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of""" """ `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" ,UpperCAmelCase_ ,) if return_all_scores: _lowercase : str = None else: _lowercase : Any = 1 if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Any = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: _lowercase : Union[str, Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Union[str, Any] = super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _lowercase : List[Any] = """top_k""" not in kwargs if isinstance(args[0] ,UpperCAmelCase_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : List[Any] = self.framework if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): return self.tokenizer(**UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and len(UpperCAmelCase_ ) == 1 and isinstance(inputs[0] ,UpperCAmelCase_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( """The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a""" """ dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" ) return self.tokenizer(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return self.model(**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: _lowercase : Union[str, Any] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: _lowercase : List[str] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config ,"""function_to_apply""" ) and function_to_apply is None: _lowercase : Optional[int] = self.model.config.function_to_apply else: _lowercase : List[Any] = ClassificationFunction.NONE _lowercase : Dict = model_outputs["""logits"""][0] _lowercase : Tuple = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: _lowercase : Tuple = sigmoid(UpperCAmelCase_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: _lowercase : int = softmax(UpperCAmelCase_ ) elif function_to_apply == ClassificationFunction.NONE: _lowercase : str = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} _lowercase : Union[str, Any] = [ {"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(UpperCAmelCase_ ) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase_ : x["score"] ,reverse=UpperCAmelCase_ ) if top_k is not None: _lowercase : Dict = dict_scores[:top_k] return dict_scores
600
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): # Initialise PyTorch model _lowercase : int = AlbertConfig.from_json_file(__UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) _lowercase : Tuple = AlbertForPreTraining(__UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_albert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase: Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
600
1
from __future__ import annotations from typing import Any def a ( lowerCamelCase_ ): '''simple docstring''' if not postfix_notation: return 0 lowercase__ = {'''+''', '''-''', '''*''', '''/'''} lowercase__ = [] for token in postfix_notation: if token in operations: lowercase__ , lowercase__ = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(lowerCamelCase_ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
183
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( A__ ): """simple docstring""" lowercase__ = (IPNDMScheduler,) lowercase__ = (("""num_inference_steps""", 50),) def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Tuple ): '''simple docstring''' lowercase__ = {'''num_train_timesteps''': 1_000} config.update(**lowerCamelCase ) return config def lowercase__ ( self : Any, lowerCamelCase : Any=0, **lowerCamelCase : List[str] ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**lowerCamelCase ) lowercase__ = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase ) lowercase__ = scheduler_class.from_pretrained(lowerCamelCase ) new_scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase__ ( self : str ): '''simple docstring''' pass def lowercase__ ( self : Dict, lowerCamelCase : Optional[int]=0, **lowerCamelCase : Dict ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCamelCase ) scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] if time_step is None: lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase ) lowercase__ = scheduler_class.from_pretrained(lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase__ ( self : int, **lowerCamelCase : List[str] ): '''simple docstring''' lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**lowerCamelCase ) lowercase__ = scheduler_class(**lowerCamelCase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(lowerCamelCase, lowerCamelCase ) lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowercase__ = model(lowerCamelCase, lowerCamelCase ) lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCamelCase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase, '''set_timesteps''' ): scheduler.set_timesteps(lowerCamelCase ) elif num_inference_steps is not None and not hasattr(lowerCamelCase, '''set_timesteps''' ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.timesteps[5] lowercase__ = scheduler.timesteps[6] lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def lowercase__ ( self : Dict ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCamelCase, time_step=lowerCamelCase ) def lowercase__ ( self : str ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=lowerCamelCase ) def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ = self.full_loop() lowercase__ = torch.mean(torch.abs(lowerCamelCase ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
183
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case_ : Optional[Any] = logging.get_logger(__name__) def lowerCamelCase( a__ ,a__=False): _SCREAMING_SNAKE_CASE =[] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ]) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ]) # if just the base model, we should remove "deit" from all keys that start with "deit" _SCREAMING_SNAKE_CASE =[(pair[0], pair[1][4:]) if pair[1].startswith('''deit''') else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ]) return rename_keys def lowerCamelCase( a__ ,a__ ,a__=False): for i in range(config.num_hidden_layers): if base_model: _SCREAMING_SNAKE_CASE ='''''' else: _SCREAMING_SNAKE_CASE ='''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.weight") _SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE =in_proj_weight[ : config.hidden_size, : ] _SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size] _SCREAMING_SNAKE_CASE =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _SCREAMING_SNAKE_CASE =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _SCREAMING_SNAKE_CASE =in_proj_weight[ -config.hidden_size :, : ] _SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :] def lowerCamelCase( a__ ,a__ ,a__): _SCREAMING_SNAKE_CASE =dct.pop(a__) _SCREAMING_SNAKE_CASE =val def lowerCamelCase( ): _SCREAMING_SNAKE_CASE ='''http://images.cocodataset.org/val2017/000000039769.jpg''' _SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw) return im @torch.no_grad() def lowerCamelCase( a__ ,a__): _SCREAMING_SNAKE_CASE =DeiTConfig() # all deit models have fine-tuned heads _SCREAMING_SNAKE_CASE =False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _SCREAMING_SNAKE_CASE =1000 _SCREAMING_SNAKE_CASE ='''huggingface/label-files''' _SCREAMING_SNAKE_CASE ='''imagenet-1k-id2label.json''' _SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(a__ ,a__ ,repo_type='''dataset''') ,'''r''')) _SCREAMING_SNAKE_CASE ={int(a__): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE =idalabel _SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE =int(deit_name[-6:-4]) _SCREAMING_SNAKE_CASE =int(deit_name[-3:]) # size of the architecture if deit_name[9:].startswith('''tiny'''): _SCREAMING_SNAKE_CASE =192 _SCREAMING_SNAKE_CASE =768 _SCREAMING_SNAKE_CASE =12 _SCREAMING_SNAKE_CASE =3 elif deit_name[9:].startswith('''small'''): _SCREAMING_SNAKE_CASE =384 _SCREAMING_SNAKE_CASE =1536 _SCREAMING_SNAKE_CASE =12 _SCREAMING_SNAKE_CASE =6 if deit_name[9:].startswith('''base'''): pass elif deit_name[4:].startswith('''large'''): _SCREAMING_SNAKE_CASE =1024 _SCREAMING_SNAKE_CASE =4096 _SCREAMING_SNAKE_CASE =24 _SCREAMING_SNAKE_CASE =16 # load original model from timm _SCREAMING_SNAKE_CASE =timm.create_model(a__ ,pretrained=a__) timm_model.eval() # load state_dict of original model, remove and rename some keys _SCREAMING_SNAKE_CASE =timm_model.state_dict() _SCREAMING_SNAKE_CASE =create_rename_keys(a__ ,a__) for src, dest in rename_keys: rename_key(a__ ,a__ ,a__) read_in_q_k_v(a__ ,a__ ,a__) # load HuggingFace model _SCREAMING_SNAKE_CASE =DeiTForImageClassificationWithTeacher(a__).eval() model.load_state_dict(a__) # Check outputs on an image, prepared by DeiTImageProcessor _SCREAMING_SNAKE_CASE =int( (256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _SCREAMING_SNAKE_CASE =DeiTImageProcessor(size=a__ ,crop_size=config.image_size) _SCREAMING_SNAKE_CASE =image_processor(images=prepare_img() ,return_tensors='''pt''') _SCREAMING_SNAKE_CASE =encoding['''pixel_values'''] _SCREAMING_SNAKE_CASE =model(a__) _SCREAMING_SNAKE_CASE =timm_model(a__) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a__ ,outputs.logits ,atol=1e-3) Path(a__).mkdir(exist_ok=a__) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}") model.save_pretrained(a__) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(a__) if __name__ == "__main__": snake_case_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) snake_case_ : int = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
191
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: snake_case_ : Any = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): def __init__( self : Tuple , _a : str , _a : List[str]=7 , _a : Union[str, Any]=3 , _a : List[str]=18 , _a : List[Any]=30 , _a : Optional[Any]=400 , _a : Dict=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[Any]=None , ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 20, '''width''': 20} _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =min_resolution _SCREAMING_SNAKE_CASE =max_resolution _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =do_normalize _SCREAMING_SNAKE_CASE =do_convert_rgb _SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096] _SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __UpperCamelCase ( self : str ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' _SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class A__ ( UpperCamelCase__ , unittest.TestCase ): UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self ) @property def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , '''do_normalize''' ) ) self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image() _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) _SCREAMING_SNAKE_CASE =2048 _SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''pt''' , max_patches=_a ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) ) def __UpperCamelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCamelCase ( self : Dict ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 _SCREAMING_SNAKE_CASE =True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_a ): _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches _SCREAMING_SNAKE_CASE ='''Hello''' _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCamelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class A__ ( UpperCamelCase__ , unittest.TestCase ): UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 ) _SCREAMING_SNAKE_CASE =3 @property def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : Dict ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , '''do_normalize''' ) ) self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) ) def __UpperCamelCase ( self : Dict ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='''pt''' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
191
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase_ = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase_ = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase_ = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase_ = { "facebook/dpr-ctx_encoder-single-nq-base": 5_12, "facebook/dpr-ctx_encoder-multiset-base": 5_12, } UpperCAmelCase_ = { "facebook/dpr-question_encoder-single-nq-base": 5_12, "facebook/dpr-question_encoder-multiset-base": 5_12, } UpperCAmelCase_ = { "facebook/dpr-reader-single-nq-base": 5_12, "facebook/dpr-reader-multiset-base": 5_12, } UpperCAmelCase_ = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } UpperCAmelCase_ = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } UpperCAmelCase_ = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class __UpperCamelCase ( A__ ): __A : Optional[int] = VOCAB_FILES_NAMES __A : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __A : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __UpperCamelCase ( A__ ): __A : Dict = VOCAB_FILES_NAMES __A : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __A : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase_ = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) UpperCAmelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) UpperCAmelCase_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(A__ ) class __UpperCamelCase : def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): if titles is None and texts is None: return super().__call__( _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , ) elif titles is None or texts is None: _UpperCAmelCase = titles if texts is None else texts return super().__call__( _UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = titles if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [titles] _UpperCAmelCase = texts if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [texts] _UpperCAmelCase = len(_UpperCamelCase ) _UpperCAmelCase = questions if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [questions] * n_passages if len(_UpperCamelCase ) != len(_UpperCamelCase ): raise ValueError( f'''There should be as many titles than texts but got {len(_UpperCamelCase )} titles and {len(_UpperCamelCase )} texts.''' ) _UpperCAmelCase = super().__call__(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids'''] _UpperCAmelCase = super().__call__(_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids'''] _UpperCAmelCase = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_UpperCamelCase , _UpperCamelCase ) ] } if return_attention_mask is not False: _UpperCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) _UpperCAmelCase = attention_mask return self.pad(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = 64 , _UpperCamelCase = 4 , ): _UpperCAmelCase = reader_input['''input_ids'''] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3] _UpperCAmelCase = len(_UpperCamelCase ) _UpperCAmelCase = sorted(range(_UpperCamelCase ) , reverse=_UpperCamelCase , key=relevance_logits.__getitem__ ) _UpperCAmelCase = [] for doc_id in sorted_docs: _UpperCAmelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence _UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _UpperCAmelCase = sequence_ids.index(self.pad_token_id ) else: _UpperCAmelCase = len(_UpperCamelCase ) _UpperCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCamelCase , top_spans=_UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCamelCase , start_index=_UpperCamelCase , end_index=_UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = [] for start_index, start_score in enumerate(_UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) _UpperCAmelCase = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase ) _UpperCAmelCase = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) _UpperCAmelCase = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A__ ) class __UpperCamelCase ( A__ , A__ ): __A : Dict = VOCAB_FILES_NAMES __A : Dict = READER_PRETRAINED_VOCAB_FILES_MAP __A : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION __A : Tuple = ["""input_ids""", """attention_mask"""]
32
from typing import List from .keymap import KEYMAP, get_character def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : Any ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator class __UpperCamelCase ( A__ ): def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not hasattr(_UpperCamelCase , '''key_handler''' ): setattr(_UpperCamelCase , '''key_handler''' , {} ) setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] ) for key in handled_keys: _UpperCAmelCase = value return new_cls @staticmethod def UpperCamelCase( cls ): _UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: _UpperCAmelCase = ord(_UpperCamelCase ) _UpperCAmelCase = cls.key_handler.get(_UpperCamelCase ) if handler: _UpperCAmelCase = char return handler(cls ) else: return None def A__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
32
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __lowerCamelCase : str = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) a_ = field( default=UpperCamelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) a_ = field( default=UpperCamelCase_ , metadata={"help": "The column name of the images in the files."} ) a_ = field(default=UpperCamelCase_ , metadata={"help": "A folder containing the training data."} ) a_ = field(default=UpperCamelCase_ , metadata={"help": "A folder containing the validation data."} ) a_ = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowercase ( self : List[Any] ): snake_case__ : int = {} if self.train_dir is not None: snake_case__ : Tuple = self.train_dir if self.validation_dir is not None: snake_case__ : Dict = self.validation_dir snake_case__ : str = data_files if data_files else None @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) a_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) a_ = field(default=UpperCamelCase_ , metadata={"help": "Name or path of preprocessor config."} ) a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) a_ = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = field( default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples] ) return {"pixel_values": pixel_values} def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case__, snake_case__, snake_case__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case__, snake_case__, snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mae" , snake_case_ , snake_case_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() snake_case__ : int = training_args.get_process_log_level() logger.setLevel(snake_case_ ) transformers.utils.logging.set_verbosity(snake_case_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. snake_case__ : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case__ : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. snake_case__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. snake_case__ : Dict = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0: snake_case__ : Optional[Any] = ds["train"].train_test_split(data_args.train_val_split ) snake_case__ : List[Any] = split["train"] snake_case__ : List[str] = split["test"] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : Tuple = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: snake_case__ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_ ) elif model_args.model_name_or_path: snake_case__ : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: snake_case__ : List[Any] = ViTMAEConfig() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { "mask_ratio": model_args.mask_ratio, "norm_pix_loss": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: snake_case__ : Optional[int] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ ) elif model_args.model_name_or_path: snake_case__ : List[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: snake_case__ : Optional[int] = ViTImageProcessor() # create model if model_args.model_name_or_path: snake_case__ : Optional[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) snake_case__ : Tuple = ViTMAEForPreTraining(snake_case_ ) if training_args.do_train: snake_case__ : Union[str, Any] = ds["train"].column_names else: snake_case__ : Any = ds["validation"].column_names if data_args.image_column_name is not None: snake_case__ : List[Any] = data_args.image_column_name elif "image" in column_names: snake_case__ : Tuple = "image" elif "img" in column_names: snake_case__ : List[str] = "img" else: snake_case__ : Any = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: snake_case__ : int = image_processor.size["shortest_edge"] else: snake_case__ : List[Any] = (image_processor.size["height"], image_processor.size["width"]) snake_case__ : Optional[Any] = Compose( [ Lambda(lambda snake_case_ : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(snake_case_ : Dict ): snake_case__ : Optional[Any] = [transforms(snake_case_ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: snake_case__ : Any = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case_ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: snake_case__ : Tuple = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case_ ) # Compute absolute learning rate snake_case__ : int = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: snake_case__ : Optional[Any] = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer snake_case__ : Optional[Any] = Trainer( model=snake_case_ , args=snake_case_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , ) # Training if training_args.do_train: snake_case__ : Tuple = None if training_args.resume_from_checkpoint is not None: snake_case__ : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: snake_case__ : Optional[int] = last_checkpoint snake_case__ : List[Any] = trainer.train(resume_from_checkpoint=snake_case_ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: snake_case__ : int = trainer.evaluate() trainer.log_metrics("eval" , snake_case_ ) trainer.save_metrics("eval" , snake_case_ ) # Write model card and (optionally) push to hub snake_case__ : Union[str, Any] = { "tasks": "masked-auto-encoding", "dataset": data_args.dataset_name, "tags": ["masked-auto-encoding"], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case_ ) else: trainer.create_model_card(**snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
25
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
1
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch A_ = True except ImportError: A_ = False try: from torch.hub import _get_torch_home A_ = _get_torch_home() except ImportError: A_ = os.path.expanduser( os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch""")) ) A_ = os.path.join(torch_cache_home, """transformers""") A_ = """https://cdn.huggingface.co""" A_ = """https://s3.amazonaws.com/models.huggingface.co/bert""" A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1]) A_ = os.path.join(PATH, """config.yaml""") A_ = os.path.join(PATH, """attributes.txt""") A_ = os.path.join(PATH, """objects.txt""") A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path) A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE) A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE) A_ = """pytorch_model.bin""" A_ = """config.yaml""" def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ): lowerCamelCase_ = [] with open(lowerCAmelCase__ ) as f: for object in f.readlines(): vg_classes.append(object.split(''',''' )[0].lower().strip() ) lowerCamelCase_ = [] with open(lowerCAmelCase__ ) as f: for object in f.readlines(): vg_attrs.append(object.split(''',''' )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = OrderedDict() with open(lowerCAmelCase__ ,'''rb''' ) as f: lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model'''] for k in copy.deepcopy(list(ckp.keys() ) ): lowerCamelCase_ = ckp.pop(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,np.ndarray ): lowerCamelCase_ = torch.tensor(lowerCAmelCase__ ) else: assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ ) lowerCamelCase_ = v return r class __lowerCamelCase : a__: Union[str, Any] = {} def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ): lowerCamelCase_ = name lowerCamelCase_ = level lowerCamelCase_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() lowerCamelCase_ = copy.deepcopy(UpperCAmelCase ) lowerCamelCase_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) lowerCamelCase_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) lowerCamelCase_ = d def __repr__( self ): return str(list((self._pointer.keys()) ) ) def __setattr__( self , UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = val lowerCamelCase_ = val lowerCamelCase_ = key.split('''.''' ) lowerCamelCase_ = len(UpperCAmelCase ) - 1 lowerCamelCase_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase ) if l == last_level: lowerCamelCase_ = val else: lowerCamelCase_ = pointer[l] def UpperCAmelCase__ ( self ): return self._pointer def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ): with open(f"{file_name}" , '''w''' ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ): with open(f"{file_name}" , '''w''' ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def UpperCAmelCase__ ( UpperCAmelCase ): with open(UpperCAmelCase ) as stream: lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self ): lowerCamelCase_ = ''' ''' if self._name != "root": lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n" else: lowerCamelCase_ = '''''' lowerCamelCase_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f"{t * (self._level)}{v}\n" self._level += 1 else: r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n" lowerCamelCase_ = level return r[:-1] @classmethod def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ): lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ): lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase ) lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase ) lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase ) lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase ) lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): lowerCamelCase_ = pretrained_model_name_or_path else: lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached lowerCamelCase_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError lowerCamelCase_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: lowerCamelCase_ = '''Can\'t load config for''' raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print('''loading configuration file from path''' ) else: print('''loading configuration file cache''' ) return Config.load_yaml(UpperCAmelCase ), kwargs def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device ) lowerCamelCase_ = in_tensor.numpy() lowerCamelCase_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), ( f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %" " element-wise mismatch" ) raise Exception('''tensors are all good''' ) # Hugging face functions below def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = urlparse(lowerCAmelCase__ ) return parsed.scheme in ("http", "https") def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX lowerCamelCase_ = '''/''' not in model_id if legacy_format: return f"{endpoint}/{model_id}-{filename}" else: return f"{endpoint}/{model_id}/{filename}" def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,): lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() ) elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): ua += "; " + user_agent lowerCamelCase_ = {'''user-agent''': ua} if resume_size > 0: lowerCamelCase_ = '''bytes=%d-''' % (resume_size,) lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ ) if response.status_code == 416: # Range not satisfiable return lowerCamelCase_ = response.headers.get('''Content-Length''' ) lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None lowerCamelCase_ = tqdm( unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowerCAmelCase__ ) ) temp_file.write(lowerCAmelCase__ ) progress.close() def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,): if cache_dir is None: lowerCamelCase_ = TRANSFORMERS_CACHE if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = str(lowerCAmelCase__ ) os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ ) lowerCamelCase_ = None if not local_files_only: try: lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ ) if response.status_code == 200: lowerCamelCase_ = response.headers.get('''ETag''' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ ) # get cache path to put the file lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowerCAmelCase__ ): return cache_path else: lowerCamelCase_ = [ file for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' ) if not file.endswith('''.json''' ) and not file.endswith('''.lock''' ) ] if len(lowerCAmelCase__ ) > 0: return os.path.join(lowerCAmelCase__ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( '''Cannot find the requested files in the cached path and outgoing traffic has been''' ''' disabled. To enable model look-ups and downloads online, set \'local_files_only\'''' ''' to False.''' ) return None # From now on, etag is not None. if os.path.exists(lowerCAmelCase__ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lowerCamelCase_ = cache_path + '''.lock''' with FileLock(lowerCAmelCase__ ): # If the download just completed while the lock was activated. if os.path.exists(lowerCAmelCase__ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: lowerCamelCase_ = cache_path + '''.incomplete''' @contextmanager def _resumable_file_manager(): with open(lowerCAmelCase__ ,'''a+b''' ) as f: yield f lowerCamelCase_ = _resumable_file_manager if os.path.exists(lowerCAmelCase__ ): lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size else: lowerCamelCase_ = 0 else: lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ ) lowerCamelCase_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,) http_get( lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,) os.replace(temp_file.name ,lowerCAmelCase__ ) lowerCamelCase_ = {'''url''': url, '''etag''': etag} lowerCamelCase_ = cache_path + '''.json''' with open(lowerCAmelCase__ ,'''w''' ) as meta_file: json.dump(lowerCAmelCase__ ,lowerCAmelCase__ ) return cache_path def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ): lowerCamelCase_ = url.encode('''utf-8''' ) lowerCamelCase_ = shaaaa(lowerCAmelCase__ ) lowerCamelCase_ = url_hash.hexdigest() if etag: lowerCamelCase_ = etag.encode('''utf-8''' ) lowerCamelCase_ = shaaaa(lowerCAmelCase__ ) filename += "." + etag_hash.hexdigest() if url.endswith('''.h5''' ): filename += ".h5" return filename def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,): if cache_dir is None: lowerCamelCase_ = TRANSFORMERS_CACHE if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = str(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = str(lowerCAmelCase__ ) if is_remote_url(lowerCAmelCase__ ): # URL, so get it from the cache (downloading if necessary) lowerCamelCase_ = get_from_cache( lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) elif os.path.exists(lowerCAmelCase__ ): # File, and it exists. lowerCamelCase_ = url_or_filename elif urlparse(lowerCAmelCase__ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) ) else: # Something unknown raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) ) if extract_compressed_file: if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ ) lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted''' lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract: return output_path_extracted # Prevent parallel extractions lowerCamelCase_ = output_path + '''.lock''' with FileLock(lowerCAmelCase__ ): shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ ) os.makedirs(lowerCAmelCase__ ) if is_zipfile(lowerCAmelCase__ ): with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file: zip_file.extractall(lowerCAmelCase__ ) zip_file.close() elif tarfile.is_tarfile(lowerCAmelCase__ ): lowerCamelCase_ = tarfile.open(lowerCAmelCase__ ) tar_file.extractall(lowerCAmelCase__ ) tar_file.close() else: raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) ) return output_path_extracted return output_path def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ): assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) if os.path.isfile(lowerCAmelCase__ ): with open(lowerCAmelCase__ ) as f: lowerCamelCase_ = eval(f.read() ) else: lowerCamelCase_ = requests.get(lowerCAmelCase__ ) try: lowerCamelCase_ = requests.json() except Exception: lowerCamelCase_ = req.content.decode() assert data is not None, "could not connect" try: lowerCamelCase_ = eval(lowerCAmelCase__ ) except Exception: lowerCamelCase_ = data.split('''\n''' ) req.close() return data def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = requests.get(lowerCAmelCase__ ) lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = url.split('''/''' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowerCAmelCase__ ) with open(lowerCAmelCase__ ,'''rb''' ) as stream: lowerCamelCase_ = pkl.load(lowerCAmelCase__ ) lowerCamelCase_ = weights.pop('''model''' ) lowerCamelCase_ = {} for k, v in model.items(): lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ ) if "running_var" in k: lowerCamelCase_ = torch.tensor([0] ) lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' ) lowerCamelCase_ = zero return new def lowercase ( ): print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" ) def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ): assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) if os.path.isfile(lowerCAmelCase__ ): lowerCamelCase_ = cva.imread(lowerCAmelCase__ ) else: lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ ) assert img is not None, f"could not connect to: {im}" lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": lowerCamelCase_ = img[:, :, ::-1] return img def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ): return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
29
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCamelCase ( lowerCAmelCase ): a__: Any = (DDPMScheduler,) def UpperCAmelCase__ ( self , **UpperCAmelCase ): lowerCamelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCAmelCase ) return config def UpperCAmelCase__ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def UpperCAmelCase__ ( self ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def UpperCAmelCase__ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def UpperCAmelCase__ ( self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase ) def UpperCAmelCase__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase ) def UpperCAmelCase__ ( self ): self.check_over_configs(thresholding=UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , ) def UpperCAmelCase__ ( self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def UpperCAmelCase__ ( self ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) lowerCamelCase_ = len(UpperCAmelCase ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter lowerCamelCase_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCamelCase_ = pred_prev_sample lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) ) lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) lowerCamelCase_ = len(UpperCAmelCase ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter lowerCamelCase_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCamelCase_ = pred_prev_sample lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) ) lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) lowerCamelCase_ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase ) lowerCamelCase_ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase ): if i == len(UpperCAmelCase ) - 1: lowerCamelCase_ = -1 else: lowerCamelCase_ = timesteps[i + 1] lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase ) lowerCamelCase_ = prev_t.item() self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) lowerCamelCase_ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) lowerCamelCase_ = [100, 87, 50, 1, 0] lowerCamelCase_ = len(UpperCAmelCase ) with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCAmelCase ) lowerCamelCase_ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCAmelCase )
29
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __a : def __init__( self , a__ , a__=13 , a__=32 , a__=3 , a__=4 , a__=[10, 20, 30, 40] , a__=[2, 2, 3, 2] , a__=True , a__=True , a__=37 , a__="gelu" , a__=10 , a__=0.02 , a__=["stage2", "stage3", "stage4"] , a__=3 , a__=None , ): _lowerCamelCase = parent _lowerCamelCase = batch_size _lowerCamelCase = image_size _lowerCamelCase = num_channels _lowerCamelCase = num_stages _lowerCamelCase = hidden_sizes _lowerCamelCase = depths _lowerCamelCase = is_training _lowerCamelCase = use_labels _lowerCamelCase = intermediate_size _lowerCamelCase = hidden_act _lowerCamelCase = type_sequence_label_size _lowerCamelCase = initializer_range _lowerCamelCase = out_features _lowerCamelCase = num_labels _lowerCamelCase = scope _lowerCamelCase = num_stages def snake_case_ ( self ): _lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase = None if self.use_labels: _lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase = self.get_config() return config, pixel_values, labels def snake_case_ ( self ): return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def snake_case_ ( self ): return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCamelCase__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCamelCase__ , loss_ignore_index=2_55 , num_labels=self.num_labels , ) def snake_case_ ( self , a__ , a__ , a__ ): _lowerCamelCase = UperNetForSemanticSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowerCamelCase = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case_ ( self ): _lowerCamelCase = self.prepare_config_and_inputs() ( _lowerCamelCase ) = config_and_inputs _lowerCamelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE__ : str = (UperNetForSemanticSegmentation,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Dict = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def snake_case_ ( self ): _lowerCamelCase = UperNetModelTester(self ) _lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def snake_case_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self ): return def snake_case_ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase = model_class(lowerCamelCase__ ) _lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase = [*signature.parameters.keys()] _lowerCamelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def snake_case_ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def snake_case_ ( self ): pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def snake_case_ ( self ): pass @unittest.skip(reason='UperNet does not have a base model' ) def snake_case_ ( self ): pass @unittest.skip(reason='UperNet does not have a base model' ) def snake_case_ ( self ): pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def snake_case_ ( self ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case_ ( self ): pass def snake_case_ ( self ): def check_hidden_states_output(a__ , a__ , a__ ): _lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): _lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) _lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def snake_case_ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase = _config_zero_init(lowerCamelCase__ ) _lowerCamelCase = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _lowerCamelCase = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason='UperNet does not have tied weights' ) def snake_case_ ( self ): pass @slow def snake_case_ ( self ): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( )-> int: _lowerCamelCase = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) _lowerCamelCase = Image.open(__lowerCAmelCase ).convert('RGB' ) return image @require_torch @require_vision @slow class __a ( unittest.TestCase ): def snake_case_ ( self ): _lowerCamelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) _lowerCamelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCamelCase__ ) _lowerCamelCase = prepare_img() _lowerCamelCase = processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) with torch.no_grad(): _lowerCamelCase = model(**lowerCamelCase__ ) _lowerCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) _lowerCamelCase = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) def snake_case_ ( self ): _lowerCamelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) _lowerCamelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCamelCase__ ) _lowerCamelCase = prepare_img() _lowerCamelCase = processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) with torch.no_grad(): _lowerCamelCase = model(**lowerCamelCase__ ) _lowerCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) _lowerCamelCase = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
718
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def SCREAMING_SNAKE_CASE_ ( )-> int: _lowerCamelCase = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } _lowerCamelCase = Dataset.from_dict(snake_case ) return dataset class __a ( lowerCAmelCase__ ): def snake_case_ ( self ): _lowerCamelCase = get_dataset() _lowerCamelCase = make_duplicate_clusters(a__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def snake_case_ ( self ): _lowerCamelCase = get_dataset() _lowerCamelCase , _lowerCamelCase = deduplicate_dataset(a__ ) self.assertEqual(len(a__ ) , 2 ) print(a__ ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a__ )
222
0
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowercase_ = re.compile("""[^A-Za-z_0-9]""") # parameters used in DuplicationIndex lowercase_ = 10 lowercase_ = 256 def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if len(_lowercase ) < MIN_NUM_TOKENS: return None lowercase__ = MinHash(num_perm=_lowercase ) for token in set(_lowercase ): min_hash.update(token.encode() ) return min_hash def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0} class _snake_case : def __init__( self : Optional[int], *, __lowercase : float = 0.85, ): lowercase__ = duplication_jaccard_threshold lowercase__ = NUM_PERM lowercase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold, num_perm=self._num_perm ) lowercase__ = defaultdict(a__ ) def A__ ( self : Any, __lowercase : Tuple, __lowercase : MinHash ): lowercase__ = self._index.query(a__ ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(a__, a__ ) if len(a__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(a__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(a__ ) def A__ ( self : str ): lowercase__ = [] for base, duplicates in self._duplicate_clusters.items(): lowercase__ = [base] + list(a__ ) # reformat the cluster to be a list of dict lowercase__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(a__ ) return duplicate_clusters def A__ ( self : List[str], __lowercase : Any ): lowercase__ = self.get_duplicate_clusters() with open(a__, "w" ) as f: json.dump(a__, a__ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = element lowercase__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = DuplicationIndex(duplication_jaccard_threshold=_lowercase ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=100 ) ): di.add(_lowercase , _lowercase ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = get_tokens(_lowercase ) lowercase__ = get_tokens(_lowercase ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowercase_ = None def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = [] for elementa in cluster: lowercase__ = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: lowercase__ = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold: elementa["copies"] += 1 break else: lowercase__ = 1 extremes.append(_lowercase ) return extremes def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): global _shared_dataset lowercase__ = dataset lowercase__ = [] lowercase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _lowercase , _lowercase , ) , total=len(_lowercase ) , ): extremes_list.append(_lowercase ) return extremes_list def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.85 ): lowercase__ = make_duplicate_clusters(_lowercase , _lowercase ) lowercase__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster} lowercase__ = {} lowercase__ = find_extremes(_lowercase , _lowercase , _lowercase ) for extremes in extremes_clusters: for element in extremes: lowercase__ = element lowercase__ = duplicate_indices - set(extreme_dict.keys() ) lowercase__ = dataset.filter(lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : idx not in remove_indices , with_indices=_lowercase ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: lowercase__ = element["base_index"] in extreme_dict if element["is_extreme"]: lowercase__ = extreme_dict[element["base_index"]]["copies"] print(f'''Original dataset size: {len(_lowercase )}''' ) print(f'''Number of duplicate clusters: {len(_lowercase )}''' ) print(f'''Files in duplicate cluster: {len(_lowercase )}''' ) print(f'''Unique files in duplicate cluster: {len(_lowercase )}''' ) print(f'''Filtered dataset size: {len(_lowercase )}''' ) return ds_filter, duplicate_clusters
413
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _lowerCAmelCase (_lowercase ): """simple docstring""" return x + 2 class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase_ ( self : Union[str, Any] ): a__ = "x = 3" a__ = {} a__ = evaluate(a__ ,{} ,state=a__ ) assert result == 3 self.assertDictEqual(a__ ,{"x": 3} ) a__ = "x = y" a__ = {"y": 5} a__ = evaluate(a__ ,{} ,state=a__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(a__ ,{"x": 5, "y": 5} ) def lowerCAmelCase_ ( self : str ): a__ = "y = add_two(x)" a__ = {"x": 3} a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ ) assert result == 5 self.assertDictEqual(a__ ,{"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: a__ = evaluate(a__ ,{} ,state=a__ ) assert result is None assert "tried to execute add_two" in out.out def lowerCAmelCase_ ( self : Any ): a__ = "x = 3" a__ = {} a__ = evaluate(a__ ,{} ,state=a__ ) assert result == 3 self.assertDictEqual(a__ ,{"x": 3} ) def lowerCAmelCase_ ( self : Dict ): a__ = "test_dict = {'x': x, 'y': add_two(x)}" a__ = {"x": 3} a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ ) self.assertDictEqual(a__ ,{"x": 3, "y": 5} ) self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} ) def lowerCAmelCase_ ( self : Dict ): a__ = "x = 3\ny = 5" a__ = {} a__ = evaluate(a__ ,{} ,state=a__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(a__ ,{"x": 3, "y": 5} ) def lowerCAmelCase_ ( self : str ): a__ = "text = f'This is x: {x}.'" a__ = {"x": 3} a__ = evaluate(a__ ,{} ,state=a__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(a__ ,{"x": 3, "text": "This is x: 3."} ) def lowerCAmelCase_ ( self : Union[str, Any] ): a__ = "if x <= 3:\n y = 2\nelse:\n y = 5" a__ = {"x": 3} a__ = evaluate(a__ ,{} ,state=a__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(a__ ,{"x": 3, "y": 2} ) a__ = {"x": 8} a__ = evaluate(a__ ,{} ,state=a__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(a__ ,{"x": 8, "y": 5} ) def lowerCAmelCase_ ( self : List[Any] ): a__ = "test_list = [x, add_two(x)]" a__ = {"x": 3} a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ ) self.assertListEqual(a__ ,[3, 5] ) self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} ) def lowerCAmelCase_ ( self : Any ): a__ = "y = x" a__ = {"x": 3} a__ = evaluate(a__ ,{} ,state=a__ ) assert result == 3 self.assertDictEqual(a__ ,{"x": 3, "y": 3} ) def lowerCAmelCase_ ( self : Tuple ): a__ = "test_list = [x, add_two(x)]\ntest_list[1]" a__ = {"x": 3} a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ ) assert result == 5 self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} ) a__ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" a__ = {"x": 3} a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ ) assert result == 5 self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} ) def lowerCAmelCase_ ( self : List[Any] ): a__ = "x = 0\nfor i in range(3):\n x = i" a__ = {} a__ = evaluate(a__ ,{"range": range} ,state=a__ ) assert result == 2 self.assertDictEqual(a__ ,{"x": 2, "i": 2} )
331
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'post_extract_proj': 'feature_projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.upsample.0': 'encoder.upsample.projection', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): for attribute in key.split("." ): A = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: A = getattr(UpperCamelCase , UpperCamelCase ).shape else: A = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": A = value elif weight_type == "weight_g": A = value elif weight_type == "weight_v": A = value elif weight_type == "bias": A = value else: A = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = [] A = fairseq_model.state_dict() A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): A = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) A = True else: for key, mapped_key in MAPPING.items(): A = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A = True if "*" in mapped_key: A = name.split(UpperCamelCase )[0].split("." )[-2] A = mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: A = "weight_g" elif "weight_v" in name: A = "weight_v" elif "weight" in name: A = "weight" elif "bias" in name: A = "bias" else: A = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = full_name.split("conv_layers." )[-1] A = name.split("." ) A = int(items[0] ) A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase ) def A__ ( UpperCamelCase , UpperCamelCase ): A = SEWConfig() if is_finetuned: A = model.wav_encoder.wav_model.cfg else: A = model.cfg A = fs_config.conv_bias A = eval(fs_config.conv_feature_layers ) A = [x[0] for x in conv_layers] A = [x[1] for x in conv_layers] A = [x[2] for x in conv_layers] A = "gelu" A = "layer" if fs_config.extractor_mode == "layer_norm" else "group" A = 0.0 A = fs_config.activation_fn.name A = fs_config.encoder_embed_dim A = 0.02 A = fs_config.encoder_ffn_embed_dim A = 1E-5 A = fs_config.encoder_layerdrop A = fs_config.encoder_attention_heads A = fs_config.conv_pos_groups A = fs_config.conv_pos A = len(UpperCamelCase ) A = fs_config.encoder_layers A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: A = model.cfg A = fs_config.final_dropout A = fs_config.layerdrop A = fs_config.activation_dropout A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 A = fs_config.attention_dropout A = fs_config.dropout_input A = fs_config.dropout A = fs_config.mask_channel_length A = fs_config.mask_channel_prob A = fs_config.mask_length A = fs_config.mask_prob A = "Wav2Vec2FeatureExtractor" A = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ): if is_finetuned: A, A, A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: A, A, A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: A = SEWConfig.from_pretrained(UpperCamelCase ) else: A = convert_config(model[0] , UpperCamelCase ) A = model[0].eval() A = True if config.feat_extract_norm == "layer" else False A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , ) if is_finetuned: if dict_path: A = Dictionary.load(UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A = target_dict.pad_index A = target_dict.bos_index A = target_dict.pad_index A = target_dict.bos_index A = target_dict.eos_index A = len(target_dict.symbols ) A = os.path.join(UpperCamelCase , "vocab.json" ) if not os.path.isdir(UpperCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase ) ) return os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with open(UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , UpperCamelCase ) A = WavaVecaCTCTokenizer( UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase , ) A = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) A = SEWForCTC(UpperCamelCase ) else: A = SEWModel(UpperCamelCase ) feature_extractor.save_pretrained(UpperCamelCase ) recursively_load_weights(UpperCamelCase , UpperCamelCase , UpperCamelCase ) hf_model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) _snake_case : str = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
720
"""simple docstring""" import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _UpperCAmelCase ( lowercase_ , lowercase_ ): @register_to_config def __init__( self :Union[str, Any] , *, __UpperCamelCase :int = 4 , __UpperCamelCase :int = 7_68 , __UpperCamelCase :int , __UpperCamelCase :int , ): super().__init__() A = nn.Parameter(torch.zeros(__UpperCamelCase ) ) # parameters for additional clip time embeddings A = nn.Linear(__UpperCamelCase , __UpperCamelCase ) A = nn.Linear(__UpperCamelCase , __UpperCamelCase ) # parameters for encoder hidden states A = clip_extra_context_tokens A = nn.Linear( __UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) A = nn.Linear(__UpperCamelCase , __UpperCamelCase ) A = nn.LayerNorm(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , *, __UpperCamelCase :Tuple , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :int ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings A = image_embeddings.shape[0] A = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) A = classifier_free_guidance_embeddings.expand( __UpperCamelCase , -1 ) A = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] A = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... A = self.embedding_proj(__UpperCamelCase ) A = self.clip_image_embeddings_project_to_time_embeddings(__UpperCamelCase ) A = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" A = self.clip_extra_context_tokens_proj(__UpperCamelCase ) A = clip_extra_context_tokens.reshape(__UpperCamelCase , -1 , self.clip_extra_context_tokens ) A = clip_extra_context_tokens.permute(0 , 2 , 1 ) A = self.encoder_hidden_states_proj(__UpperCamelCase ) A = self.text_encoder_hidden_states_norm(__UpperCamelCase ) A = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
524
0
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case = logging.getLogger(__name__) def snake_case ( lowerCAmelCase_ ) -> List[str]: _snake_case = git.Repo(search_parent_directories=lowerCAmelCase_ ) _snake_case = { '''repo_id''': str(lowerCAmelCase_ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(lowerCAmelCase_ , '''git_log.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=4 ) def snake_case ( lowerCAmelCase_ ) -> int: if params.n_gpu <= 0: _snake_case = 0 _snake_case = -1 _snake_case = True _snake_case = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 _snake_case = int(os.environ['''WORLD_SIZE'''] ) _snake_case = int(os.environ['''N_GPU_NODE'''] ) _snake_case = int(os.environ['''RANK'''] ) # number of nodes / node ID _snake_case = params.world_size // params.n_gpu_per_node _snake_case = params.global_rank // params.n_gpu_per_node _snake_case = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 _snake_case = 1 _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = 1 _snake_case = 1 _snake_case = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode _snake_case = params.node_id == 0 and params.local_rank == 0 _snake_case = params.n_nodes > 1 # summary _snake_case = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def snake_case ( lowerCAmelCase_ ) -> Dict: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
103
from __future__ import annotations from math import pi, sqrt def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> tuple: '''simple docstring''' if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
306
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" lowerCAmelCase__ :Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __A () ->str: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :int = (UnCLIPScheduler,) def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**__UpperCAmelCase ) return config def snake_case ( self ): '''simple docstring''' for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__UpperCAmelCase , prev_timestep=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ :Tuple = self.get_scheduler_config(variance_type='fixed_small_log' ) lowerCAmelCase__ :int = scheduler_class(**__UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.scheduler_classes[0] lowerCAmelCase__ :List[Any] = self.get_scheduler_config(variance_type='learned_range' ) lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 0.5 assert scheduler._get_variance(1 , predicted_variance=__UpperCAmelCase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(4_8_7 , predicted_variance=__UpperCAmelCase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(9_9_9 , predicted_variance=__UpperCAmelCase ) - -0.0_01_00_11 < 1E-5 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.scheduler_classes[0] lowerCAmelCase__ :Any = self.get_scheduler_config() lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :str = scheduler.timesteps lowerCAmelCase__ :Dict = self.dummy_model() lowerCAmelCase__ :Optional[Any] = self.dummy_sample_deter lowerCAmelCase__ :Optional[Any] = torch.manual_seed(0 ) for i, t in enumerate(__UpperCAmelCase ): # 1. predict noise residual lowerCAmelCase__ :Any = model(__UpperCAmelCase , __UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ :Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample lowerCAmelCase__ :Dict = pred_prev_sample lowerCAmelCase__ :Tuple = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :List[str] = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ :Tuple = self.get_scheduler_config() lowerCAmelCase__ :str = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(2_5 ) lowerCAmelCase__ :List[Any] = scheduler.timesteps lowerCAmelCase__ :Union[str, Any] = self.dummy_model() lowerCAmelCase__ :List[Any] = self.dummy_sample_deter lowerCAmelCase__ :int = torch.manual_seed(0 ) for i, t in enumerate(__UpperCAmelCase ): # 1. predict noise residual lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) if i + 1 == timesteps.shape[0]: lowerCAmelCase__ :Optional[Any] = None else: lowerCAmelCase__ :int = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ :Any = scheduler.step( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prev_timestep=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample lowerCAmelCase__ :Dict = pred_prev_sample lowerCAmelCase__ :int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' pass
560
0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = 1 snake_case__ = 2 for i in range(2 , max_n + 1 ): snake_case__ = pre_numerator snake_case__ = 2 * i // 3 if i % 3 == 0 else 1 snake_case__ = cur_numerator snake_case__ = e_cont * pre_numerator + temp return sum_digits(__lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
33
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
51
0
"""simple docstring""" import sys def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : Dict = len(_UpperCAmelCase ) A_ : int = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )] A_ : Tuple = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )] for chain_length in range(2 , _UpperCAmelCase ): for a in range(1 , n - chain_length + 1 ): A_ : Optional[Any] = a + chain_length - 1 A_ : List[str] = sys.maxsize for c in range(_UpperCAmelCase , _UpperCAmelCase ): A_ : Any = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: A_ : Optional[Any] = cost A_ : Optional[int] = c return matrix, sol def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" if i == j: print('A' + str(_UpperCAmelCase ) , end=' ' ) else: print('(' , end=' ' ) print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] ) print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase ) print(')' , end=' ' ) def UpperCAmelCase__ ( ): """simple docstring""" A_ : Optional[Any] = [30, 35, 15, 5, 10, 20, 25] A_ : Optional[Any] = len(_UpperCAmelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 A_ , A_ : int = matrix_chain_order(_UpperCAmelCase ) print('No. of Operation required: ' + str(matrix[1][n - 1] ) ) print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 ) if __name__ == "__main__": main()
302
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) lowerCamelCase_ : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Union[str, Any] = ['BeitFeatureExtractor'] lowerCamelCase_ : Optional[Any] = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[int] = [ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Union[str, Any] = [ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
302
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants _UpperCAmelCase : int = Mapping[str, np.ndarray] _UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict. _UpperCAmelCase : Optional[Any] = 0.01 @dataclasses.dataclass(frozen=snake_case__ ) class lowerCAmelCase_ : UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. UpperCamelCase_ :np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. UpperCamelCase_ :np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. UpperCamelCase_ :np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. UpperCamelCase_ :np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions UpperCamelCase_ :Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files UpperCamelCase_ :Optional[str] = None # Templates used to generate this protein (prediction-only) UpperCamelCase_ :Optional[Sequence[str]] = None # Chain corresponding to each parent UpperCamelCase_ :Optional[Sequence[int]] = None def lowerCAmelCase_ (lowercase__ : str ) -> Protein: '''simple docstring''' lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)''' lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0] lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowerCAmelCase__ = ["N", "CA", "C"] lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowerCAmelCase__ = g[1][0].strip() for i in range(len(lowercase__ ) ): if seq[i] not in residue_constants.restypes: lowerCAmelCase__ = '''X''' # FIXME: strings are immutable lowerCAmelCase__ = np.array( [residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowerCAmelCase__ = [] for axis in range(3 ): tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) ) lowerCAmelCase__ = np.array(lowercase__ ) lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowercase__ ): lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowerCAmelCase__ = np.zeros( ( len(lowercase__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowercase__ ): lowerCAmelCase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , ) def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = prot.remark if remark is not None: pdb_headers.append(f'REMARK {remark}' ) lowerCAmelCase__ = prot.parents lowerCAmelCase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id] if parents is None or len(lowercase__ ) == 0: lowerCAmelCase__ = ['''N/A'''] pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' ) return pdb_headers def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = pdb_str.split('''\n''' ) lowerCAmelCase__ = prot.remark if remark is not None: out_pdb_lines.append(f'REMARK {remark}' ) lowerCAmelCase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowerCAmelCase__ = [] if prot.parents_chain_index is not None: lowerCAmelCase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowercase__ ) , [] ) parent_dict[str(lowercase__ )].append(lowercase__ ) lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] ) parents_per_chain.append(lowercase__ ) else: parents_per_chain.append(list(prot.parents ) ) else: lowerCAmelCase__ = [['''N/A''']] def make_parent_line(lowercase__ : Sequence[str] ) -> str: return f'PARENT {" ".join(lowercase__ )}' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowerCAmelCase__ = 0 for i, l in enumerate(lowercase__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowercase__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowercase__ ): lowerCAmelCase__ = parents_per_chain[chain_counter] else: lowerCAmelCase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(lowercase__ ) ) return "\n".join(lowercase__ ) def lowerCAmelCase_ (lowercase__ : Protein ) -> str: '''simple docstring''' lowerCAmelCase__ = residue_constants.restypes + ['''X'''] def res_atoa(lowercase__ : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowerCAmelCase__ = residue_constants.atom_types lowerCAmelCase__ = [] lowerCAmelCase__ = prot.atom_mask lowerCAmelCase__ = prot.aatype lowerCAmelCase__ = prot.atom_positions lowerCAmelCase__ = prot.residue_index.astype(np.intaa ) lowerCAmelCase__ = prot.b_factors lowerCAmelCase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowerCAmelCase__ = get_pdb_headers(lowercase__ ) if len(lowercase__ ) > 0: pdb_lines.extend(lowercase__ ) lowerCAmelCase__ = aatype.shape[0] lowerCAmelCase__ = 1 lowerCAmelCase__ = 0 lowerCAmelCase__ = string.ascii_uppercase lowerCAmelCase__ = None # Add all atom sites. for i in range(lowercase__ ): lowerCAmelCase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowerCAmelCase__ = '''ATOM''' lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}' lowerCAmelCase__ = '''''' lowerCAmelCase__ = '''''' lowerCAmelCase__ = 1.00 lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowerCAmelCase__ = '''''' lowerCAmelCase__ = '''A''' if chain_index is not None: lowerCAmelCase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowerCAmelCase__ = ( f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}' f'{res_name_a:>3} {chain_tag:>1}' f'{residue_index[i]:>4}{insertion_code:>1} ' f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}' f'{occupancy:>6.2f}{b_factor:>6.2f} ' f'{element:>2}{charge:>2}' ) pdb_lines.append(lowercase__ ) atom_index += 1 lowerCAmelCase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowerCAmelCase__ = True lowerCAmelCase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowerCAmelCase__ = '''TER''' lowerCAmelCase__ = ( f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}' ) pdb_lines.append(lowercase__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(lowercase__ ) def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray: '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein: '''simple docstring''' return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
668
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = [ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
668
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__: List[Any] = logging.get_logger(__name__) a__: str = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = '''data2vec-text''' def __init__( self,__lowerCamelCase=3_0522,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=512,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=1E-12,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase="absolute",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,): super().__init__(pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,**__lowerCamelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): @property def UpperCamelCase ( self ): if self.task == "multiple-choice": A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
721
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a__: Any = random.Random() def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : str=1.0 , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None )->Any: if rng is None: A__ = global_rng A__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=400,__lowerCamelCase=2000,__lowerCamelCase=2048,__lowerCamelCase=128,__lowerCamelCase=1,__lowerCamelCase=512,__lowerCamelCase=30,__lowerCamelCase=4_4100,): A__ = parent A__ = batch_size A__ = min_seq_length A__ = max_seq_length A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) A__ = spectrogram_length A__ = feature_size A__ = num_audio_channels A__ = hop_length A__ = chunk_length A__ = sampling_rate def UpperCamelCase ( self ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def UpperCamelCase ( self,__lowerCamelCase=False,__lowerCamelCase=False ): def _flatten(__lowerCamelCase ): return list(itertools.chain(*__lowerCamelCase ) ) if equal_length: A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size A__ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff ) ] if numpify: A__ = [np.asarray(__lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE = TvltFeatureExtractor def UpperCamelCase ( self ): A__ = TvltFeatureExtractionTester(self ) def UpperCamelCase ( self ): A__ = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(__lowerCamelCase,'''spectrogram_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''feature_size''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''num_audio_channels''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''hop_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''chunk_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''sampling_rate''' ) ) def UpperCamelCase ( self ): A__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A__ = feat_extract_first.save_pretrained(__lowerCamelCase )[0] check_json_file_has_correct_format(__lowerCamelCase ) A__ = self.feature_extraction_class.from_pretrained(__lowerCamelCase ) A__ = feat_extract_first.to_dict() A__ = feat_extract_second.to_dict() A__ = dict_first.pop('''mel_filters''' ) A__ = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) ) self.assertEqual(__lowerCamelCase,__lowerCamelCase ) def UpperCamelCase ( self ): A__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(__lowerCamelCase,'''feat_extract.json''' ) feat_extract_first.to_json_file(__lowerCamelCase ) A__ = self.feature_extraction_class.from_json_file(__lowerCamelCase ) A__ = feat_extract_first.to_dict() A__ = feat_extract_second.to_dict() A__ = dict_first.pop('''mel_filters''' ) A__ = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) ) self.assertEqual(__lowerCamelCase,__lowerCamelCase ) def UpperCamelCase ( self ): # Initialize feature_extractor A__ = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 A__ = [floats_list((1, x) )[0] for x in range(800,1400,200 )] A__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input A__ = feature_extractor(np_speech_inputs[0],return_tensors='''np''',sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched A__ = feature_extractor(__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking A__ = feature_extractor( __lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100,mask_audio=__lowerCamelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] A__ = np.asarray(__lowerCamelCase ) A__ = feature_extractor(__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def UpperCamelCase ( self,__lowerCamelCase ): A__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''','''clean''',split='''validation''' ) # automatic decoding with librispeech A__ = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase ( self ): A__ = self._load_datasamples(1 ) A__ = TvltFeatureExtractor() A__ = feature_extractor(__lowerCamelCase,return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape,(1, 1, 192, 128) ) A__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],__lowerCamelCase,atol=1E-4 ) )
212
0
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __SCREAMING_SNAKE_CASE ( nn.Module ): A : int A : int A : float = 0.0 A : int = 1 A : int = 1 A : bool = True A : bool = False A : bool = False A : bool = False A : jnp.dtype = jnp.floataa def __lowerCamelCase ( self ): lowercase : List[str] = [] lowercase : str = [] for i in range(self.num_layers ): lowercase : List[Any] = self.in_channels if i == 0 else self.out_channels lowercase : Union[str, Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) lowercase : str = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE__ ) lowercase : str = resnets lowercase : List[Any] = attentions if self.add_downsample: lowercase : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ): lowercase : Union[str, Any] = () for resnet, attn in zip(self.resnets , self.attentions ): lowercase : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) if self.add_downsample: lowercase : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) return hidden_states, output_states class __SCREAMING_SNAKE_CASE ( nn.Module ): A : int A : int A : float = 0.0 A : int = 1 A : bool = True A : jnp.dtype = jnp.floataa def __lowerCamelCase ( self ): lowercase : Union[str, Any] = [] for i in range(self.num_layers ): lowercase : Optional[int] = self.in_channels if i == 0 else self.out_channels lowercase : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) lowercase : int = resnets if self.add_downsample: lowercase : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ): lowercase : Tuple = () for resnet in self.resnets: lowercase : int = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) if self.add_downsample: lowercase : List[str] = self.downsamplers_a(SCREAMING_SNAKE_CASE__ ) output_states += (hidden_states,) return hidden_states, output_states class __SCREAMING_SNAKE_CASE ( nn.Module ): A : int A : int A : int A : float = 0.0 A : int = 1 A : int = 1 A : bool = True A : bool = False A : bool = False A : bool = False A : jnp.dtype = jnp.floataa def __lowerCamelCase ( self ): lowercase : Dict = [] lowercase : List[str] = [] for i in range(self.num_layers ): lowercase : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase : str = self.prev_output_channel if i == 0 else self.out_channels lowercase : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) lowercase : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE__ ) lowercase : Any = resnets lowercase : Optional[Any] = attentions if self.add_upsample: lowercase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states lowercase : Optional[int] = res_hidden_states_tuple[-1] lowercase : Optional[Any] = res_hidden_states_tuple[:-1] lowercase : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase : int = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) if self.add_upsample: lowercase : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE__ ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): A : int A : int A : int A : float = 0.0 A : int = 1 A : bool = True A : jnp.dtype = jnp.floataa def __lowerCamelCase ( self ): lowercase : List[str] = [] for i in range(self.num_layers ): lowercase : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowercase : Tuple = self.prev_output_channel if i == 0 else self.out_channels lowercase : Optional[int] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = resnets if self.add_upsample: lowercase : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ): for resnet in self.resnets: # pop res hidden states lowercase : Union[str, Any] = res_hidden_states_tuple[-1] lowercase : List[str] = res_hidden_states_tuple[:-1] lowercase : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowercase : Tuple = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) if self.add_upsample: lowercase : Optional[Any] = self.upsamplers_a(SCREAMING_SNAKE_CASE__ ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): A : int A : float = 0.0 A : int = 1 A : int = 1 A : bool = False A : bool = False A : jnp.dtype = jnp.floataa def __lowerCamelCase ( self ): # there is always at least one resnet lowercase : Tuple = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] lowercase : Union[str, Any] = [] for _ in range(self.num_layers ): lowercase : Dict = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = resnets lowercase : str = attentions def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ): lowercase : List[str] = self.resnets[0](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): lowercase : Optional[int] = attn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=SCREAMING_SNAKE_CASE__ ) return hidden_states
319
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __SCREAMING_SNAKE_CASE ( A__ ): A : int = 'openai-gpt' A : str = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE__=40478 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__="cls_index" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ): lowercase : Dict = vocab_size lowercase : List[Any] = n_positions lowercase : List[Any] = n_embd lowercase : str = n_layer lowercase : str = n_head lowercase : List[Any] = afn lowercase : Union[str, Any] = resid_pdrop lowercase : Optional[Any] = embd_pdrop lowercase : Optional[int] = attn_pdrop lowercase : Optional[Any] = layer_norm_epsilon lowercase : int = initializer_range lowercase : Tuple = summary_type lowercase : Union[str, Any] = summary_use_proj lowercase : List[str] = summary_activation lowercase : List[str] = summary_first_dropout lowercase : Any = summary_proj_to_labels super().__init__(**SCREAMING_SNAKE_CASE__ )
319
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a : str = logging.get_logger(__name__) _a : List[Any] = '▁' _a : int = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } _a : Dict = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } _a : Any = { 'facebook/s2t-small-librispeech-asr': 10_24, } _a : Dict = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] _a : Any = {'mustc': MUSTC_LANGS} class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = MAX_MODEL_INPUT_SIZES A = ['''input_ids''', '''attention_mask'''] A = [] def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<unk>" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , **UpperCAmelCase , ): __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , do_upper_case=UpperCAmelCase , do_lower_case=UpperCAmelCase , tgt_lang=UpperCAmelCase , lang_codes=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __lowerCamelCase = do_upper_case __lowerCamelCase = do_lower_case __lowerCamelCase = load_json(UpperCAmelCase ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} __lowerCamelCase = spm_file __lowerCamelCase = load_spm(UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: __lowerCamelCase = lang_codes __lowerCamelCase = LANGUAGES[lang_codes] __lowerCamelCase = [f'''<lang:{lang}>''' for lang in self.langs] __lowerCamelCase = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs} __lowerCamelCase = self.lang_tokens __lowerCamelCase = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __lowerCamelCase = {} @property def lowerCamelCase_ ( self ): return len(self.encoder ) @property def lowerCamelCase_ ( self ): return self._tgt_lang @tgt_lang.setter def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = new_tgt_lang self.set_tgt_lang_special_tokens(UpperCAmelCase ) def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = self.lang_code_to_id[tgt_lang] __lowerCamelCase = [lang_code_id] def lowerCamelCase_ ( self , UpperCAmelCase ): return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def lowerCamelCase_ ( self , UpperCAmelCase ): return self.encoder.get(UpperCAmelCase , self.encoder[self.unk_token] ) def lowerCamelCase_ ( self , UpperCAmelCase ): return self.decoder.get(UpperCAmelCase , self.unk_token ) def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = [] __lowerCamelCase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __lowerCamelCase = self.sp_model.decode(UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __lowerCamelCase = [] else: current_sub_tokens.append(UpperCAmelCase ) __lowerCamelCase = self.sp_model.decode(UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) __lowerCamelCase = [1] * len(self.prefix_tokens ) __lowerCamelCase = [1] if token_ids_a is None: return prefix_ones + ([0] * len(UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(UpperCAmelCase )) + ([0] * len(UpperCAmelCase )) + suffix_ones def lowerCamelCase_ ( self ): __lowerCamelCase = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self , UpperCAmelCase ): __lowerCamelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __lowerCamelCase = {} __lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs ) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ): __lowerCamelCase = Path(UpperCAmelCase ) assert save_dir.is_dir(), f'''{save_directory} should be a directory''' __lowerCamelCase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) __lowerCamelCase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(UpperCAmelCase , """wb""" ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (str(UpperCAmelCase ), str(UpperCAmelCase )) def UpperCamelCase__ ( _A: str , _A: Dict[str, Any] ): '''simple docstring''' __lowerCamelCase = sentencepiece.SentencePieceProcessor(**_A ) spm.Load(str(_A ) ) return spm def UpperCamelCase__ ( _A: str ): '''simple docstring''' with open(_A , """r""" ) as f: return json.load(_A ) def UpperCamelCase__ ( _A: List[Any] , _A: str ): '''simple docstring''' with open(_A , """w""" ) as f: json.dump(_A , _A , indent=2 )
571
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _a : str = logging.getLogger(__name__) _a : Optional[int] = 'Hello world! cécé herlolip' _a : List[str] = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def UpperCamelCase__ ( _A: int , _A: List[str] ): '''simple docstring''' __lowerCamelCase = BertAbsConfig( temp_dir=""".""" , finetune_bert=_A , large=_A , share_emb=_A , use_bert_emb=_A , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) __lowerCamelCase = torch.load(_A , lambda _A , _A : storage ) __lowerCamelCase = AbsSummarizer(_A , torch.device("""cpu""" ) , _A ) original.eval() __lowerCamelCase = BertAbsSummarizer(_A , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) __lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs __lowerCamelCase = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) ) __lowerCamelCase = torch.tensor(_A ).unsqueeze(0 ) __lowerCamelCase = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) ) __lowerCamelCase = torch.tensor(_A ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __lowerCamelCase = encoder_input_ids __lowerCamelCase = decoder_input_ids __lowerCamelCase = __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = __lowerCamelCase = None __lowerCamelCase = __lowerCamelCase = None __lowerCamelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __lowerCamelCase = original(_A , _A , _A , _A , _A , _A , _A )[0] __lowerCamelCase = original.generator(_A ) __lowerCamelCase = new_model( _A , _A , _A , _A , _A )[0] __lowerCamelCase = new_model.generator(_A ) __lowerCamelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) ) __lowerCamelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) ) __lowerCamelCase = torch.allclose(_A , _A , atol=1e-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": _a : Any = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) _a : Any = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
571
1
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = (EulerDiscreteScheduler,) _lowerCamelCase = 10 def UpperCAmelCase__ ( self , **_lowercase ) -> List[Any]: '''simple docstring''' snake_case_ : Any = { """num_train_timesteps""": 1_1_0_0, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_lowercase ) return config def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_lowercase ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowercase ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ : str = self.scheduler_classes[0] snake_case_ : Any = self.get_scheduler_config() snake_case_ : int = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case_ : Dict = torch.manual_seed(0 ) snake_case_ : int = self.dummy_model() snake_case_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case_ : Optional[Any] = sample.to(_lowercase ) for i, t in enumerate(scheduler.timesteps ): snake_case_ : Union[str, Any] = scheduler.scale_model_input(_lowercase , _lowercase ) snake_case_ : Tuple = model(_lowercase , _lowercase ) snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) snake_case_ : List[Any] = output.prev_sample snake_case_ : List[Any] = torch.sum(torch.abs(_lowercase ) ) snake_case_ : str = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = self.scheduler_classes[0] snake_case_ : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) snake_case_ : Optional[Any] = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Union[str, Any] = self.dummy_model() snake_case_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case_ : Union[str, Any] = sample.to(_lowercase ) for i, t in enumerate(scheduler.timesteps ): snake_case_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase ) snake_case_ : Any = model(_lowercase , _lowercase ) snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) snake_case_ : Optional[Any] = output.prev_sample snake_case_ : int = torch.sum(torch.abs(_lowercase ) ) snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3 def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.scheduler_classes[0] snake_case_ : Optional[int] = self.get_scheduler_config() snake_case_ : str = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowercase ) snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Tuple = self.dummy_model() snake_case_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case_ : int = sample.to(_lowercase ) for t in scheduler.timesteps: snake_case_ : Union[str, Any] = scheduler.scale_model_input(_lowercase , _lowercase ) snake_case_ : Dict = model(_lowercase , _lowercase ) snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) snake_case_ : Union[str, Any] = output.prev_sample snake_case_ : Any = torch.sum(torch.abs(_lowercase ) ) snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ : Optional[int] = self.scheduler_classes[0] snake_case_ : List[Any] = self.get_scheduler_config() snake_case_ : Tuple = scheduler_class(**_lowercase , use_karras_sigmas=_lowercase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowercase ) snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Union[str, Any] = self.dummy_model() snake_case_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case_ : List[str] = sample.to(_lowercase ) for t in scheduler.timesteps: snake_case_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase ) snake_case_ : str = model(_lowercase , _lowercase ) snake_case_ : int = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) snake_case_ : Any = output.prev_sample snake_case_ : Dict = torch.sum(torch.abs(_lowercase ) ) snake_case_ : Union[str, Any] = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
58
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : def __init__( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str=1_3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Tuple=9_9 ,__SCREAMING_SNAKE_CASE : str=3_2 ,__SCREAMING_SNAKE_CASE : Any=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Dict=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Dict=1_0_0_0 ,): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope UpperCAmelCase = range_bbox def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) # convert bbox to numpy since TF does not support item assignment UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase = bbox[i, j, 3] UpperCAmelCase = bbox[i, j, 1] UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase = bbox[i, j, 2] UpperCAmelCase = bbox[i, j, 0] UpperCAmelCase = t UpperCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ): UpperCAmelCase = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ): UpperCAmelCase = self.num_labels UpperCAmelCase = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ): UpperCAmelCase = self.num_labels UpperCAmelCase = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self : List[Any] ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( _a , _a , unittest.TestCase): _UpperCAmelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCAmelCase : str = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : int = True _UpperCAmelCase : Union[str, Any] = 10 def _UpperCAmelCase ( self : Tuple ): UpperCAmelCase = TFLayoutLMModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 ) def _UpperCAmelCase ( self : List[str] ): self.config_tester.run_common_tests() def _UpperCAmelCase ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Any ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : List[str] ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def _UpperCAmelCase ( self : List[str] ): pass def __UpperCamelCase ( ): """simple docstring""" UpperCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231 UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231 UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) UpperCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase): @slow def _UpperCAmelCase ( self : Any ): UpperCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) # test the sequence output on [0, :3, :3] UpperCAmelCase = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) ) # test the pooled output on [1, :3] UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) ) @slow def _UpperCAmelCase ( self : Union[str, Any] ): # initialize model with randomly initialized sequence classification head UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=2 ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model( input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=tf.convert_to_tensor([1, 1] ) ,) # test whether we get a loss as a scalar UpperCAmelCase = outputs.loss UpperCAmelCase = (2,) self.assertEqual(loss.shape ,__SCREAMING_SNAKE_CASE ) # test the shape of the logits UpperCAmelCase = outputs.logits UpperCAmelCase = (2, 2) self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : Tuple ): # initialize model with randomly initialized token classification head UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=1_3 ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model( input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) # test the shape of the logits UpperCAmelCase = outputs.logits UpperCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : List[Any] ): # initialize model with randomly initialized token classification head UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) # test the shape of the logits UpperCAmelCase = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape ,__SCREAMING_SNAKE_CASE ) self.assertEqual(outputs.end_logits.shape ,__SCREAMING_SNAKE_CASE )
333
0
from numpy import exp, pi, sqrt def lowercase ( a , a = 0.0 , a = 1.0 ): '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
140
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _UpperCAmelCase ( lowercase ): lowerCamelCase_ : Union[str, Any] = """xmod""" def __init__( self : Tuple , UpperCAmelCase : Tuple=3_05_22 , UpperCAmelCase : Any=7_68 , UpperCAmelCase : Any=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[str]=30_72 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=False , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=("en_XX",) , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ :Any = hidden_act SCREAMING_SNAKE_CASE_ :Dict = intermediate_size SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ :List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings SCREAMING_SNAKE_CASE_ :Tuple = type_vocab_size SCREAMING_SNAKE_CASE_ :List[str] = initializer_range SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps SCREAMING_SNAKE_CASE_ :str = position_embedding_type SCREAMING_SNAKE_CASE_ :Any = use_cache SCREAMING_SNAKE_CASE_ :str = classifier_dropout SCREAMING_SNAKE_CASE_ :List[str] = pre_norm SCREAMING_SNAKE_CASE_ :List[str] = adapter_reduction_factor SCREAMING_SNAKE_CASE_ :int = adapter_layer_norm SCREAMING_SNAKE_CASE_ :Dict = adapter_reuse_layer_norm SCREAMING_SNAKE_CASE_ :Optional[Any] = ln_before_adapter SCREAMING_SNAKE_CASE_ :List[Any] = list(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[Any] = default_language class _UpperCAmelCase ( lowercase ): @property def _snake_case ( self : Any): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ :Dict = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE_ :Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
140
1
'''simple docstring''' from __future__ import annotations def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Optional[Any]: # noqa: E741 '''simple docstring''' while r - l > 1: snake_case__ : Optional[Any] = (l + r) // 2 if v[m] >= key: snake_case__ : List[str] = m else: snake_case__ : List[Any] = m # noqa: E741 return r def UpperCamelCase__ ( __magic_name__ : list[int] ) -> int: '''simple docstring''' if len(__magic_name__ ) == 0: return 0 snake_case__ : str = [0] * len(__magic_name__ ) snake_case__ : Any = 1 snake_case__ : Union[str, Any] = v[0] for i in range(1 , len(__magic_name__ ) ): if v[i] < tail[0]: snake_case__ : List[str] = v[i] elif v[i] > tail[length - 1]: snake_case__ : str = v[i] length += 1 else: snake_case__ : List[Any] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
38
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __UpperCAmelCase : str = get_logger(__name__) __UpperCAmelCase : Optional[Any] = Path(__file__).parent / 'model_card_template.md' __UpperCAmelCase : Tuple = uuida().hex __UpperCAmelCase : Optional[int] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __UpperCAmelCase : List[str] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __UpperCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def lowerCamelCase_ ( UpperCamelCase_ = None ): _a : str = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"""; torch/{_torch_version}""" if is_flax_available(): ua += f"""; jax/{_jax_version}""" ua += f"""; flax/{_flax_version}""" if is_onnx_available(): ua += f"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(UpperCamelCase_ , UpperCamelCase_ ): ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): ua += "; " + user_agent return ua def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ): if token is None: _a : Optional[Any] = HfFolder.get_token() if organization is None: _a : Tuple = whoami(UpperCamelCase_ )['''name'''] return f"""{username}/{model_id}""" else: return f"""{organization}/{model_id}""" def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ): if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(UpperCamelCase_ , '''local_rank''' ) and args.local_rank not in [-1, 0]: return _a : List[str] = args.hub_token if hasattr(UpperCamelCase_ , '''hub_token''' ) else None _a : int = get_full_repo_name(UpperCamelCase_ , token=UpperCamelCase_ ) _a : Optional[int] = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase_ , model_name=UpperCamelCase_ , repo_name=UpperCamelCase_ , dataset_name=args.dataset_name if hasattr(UpperCamelCase_ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(UpperCamelCase_ , '''gradient_accumulation_steps''' ) else None ) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase_ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase_ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase_ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase_ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase_ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase_ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase_ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase_ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase_ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , ) _a : Dict = os.path.join(args.output_dir , '''README.md''' ) model_card.save(UpperCamelCase_ ) def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ): if resolved_file is None or commit_hash is not None: return commit_hash _a : Union[str, Any] = str(Path(UpperCamelCase_ ).as_posix() ) _a : str = re.search(R'''snapshots/([^/]+)/''' , UpperCamelCase_ ) if search is None: return None _a : str = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __UpperCAmelCase : Dict = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __UpperCAmelCase : Optional[Any] = os.path.join(hf_cache_home, 'diffusers') def lowerCamelCase_ ( UpperCamelCase_ = None , UpperCamelCase_ = None ): if new_cache_dir is None: _a : Optional[Any] = DIFFUSERS_CACHE if old_cache_dir is None: _a : List[str] = old_diffusers_cache _a : Dict = Path(UpperCamelCase_ ).expanduser() _a : Union[str, Any] = Path(UpperCamelCase_ ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): _a : str = new_cache_dir / old_blob_path.relative_to(UpperCamelCase_ ) new_blob_path.parent.mkdir(parents=UpperCamelCase_ , exist_ok=UpperCamelCase_ ) os.replace(UpperCamelCase_ , UpperCamelCase_ ) try: os.symlink(UpperCamelCase_ , UpperCamelCase_ ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __UpperCAmelCase : Any = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __UpperCAmelCase : int = 0 else: with open(cache_version_file) as f: try: __UpperCAmelCase : Union[str, Any] = int(f.read()) except ValueError: __UpperCAmelCase : Optional[Any] = 0 if cache_version < 1: __UpperCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __UpperCAmelCase : Optional[int] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' 'the directory exists and can be written to.' ) def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ): if variant is not None: _a : Dict = weights_name.split('''.''' ) _a : List[str] = splits[:-1] + [variant] + splits[-1:] _a : int = '''.'''.join(UpperCamelCase_ ) return weights_name def lowerCamelCase_ ( UpperCamelCase_ , *, UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , ): _a : int = str(UpperCamelCase_ ) if os.path.isfile(UpperCamelCase_ ): return pretrained_model_name_or_path elif os.path.isdir(UpperCamelCase_ ): if os.path.isfile(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ): # Load from a PyTorch checkpoint _a : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ): _a : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return model_file else: raise EnvironmentError( f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse('''0.20.0''' ) ): try: _a : Optional[int] = hf_hub_download( UpperCamelCase_ , filename=_add_variant(UpperCamelCase_ , UpperCamelCase_ ) , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , user_agent=UpperCamelCase_ , subfolder=UpperCamelCase_ , revision=revision or commit_hash , ) warnings.warn( f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , UpperCamelCase_ , ) return model_file except: # noqa: E722 warnings.warn( f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase_ , UpperCamelCase_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase_ , UpperCamelCase_ )}' so that the correct variant file can be added.""" , UpperCamelCase_ , ) try: # 2. Load model file as usual _a : str = hf_hub_download( UpperCamelCase_ , filename=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , user_agent=UpperCamelCase_ , subfolder=UpperCamelCase_ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ '''this model name. Check the model page at ''' f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" f""" directory containing a file named {weights_name} or""" ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ f"""containing a file named {weights_name}""" )
471
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __A = logging.get_logger(__name__) __A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __A = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __A = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __A () ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCAmelCase__ :List[str] = bs[:] lowerCAmelCase__ :str = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase_ ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ :str = [chr(lowercase_ ) for n in cs] return dict(zip(lowercase_ , lowercase_ ) ) def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" lowerCAmelCase__ :Dict = set() lowerCAmelCase__ :Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ :Dict = char return pairs class _lowerCAmelCase ( _UpperCamelCase ): """simple docstring""" __magic_name__ :Optional[int] = VOCAB_FILES_NAMES __magic_name__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :Optional[int] = ["input_ids", "attention_mask"] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token lowerCAmelCase__ :List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token lowerCAmelCase__ :Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token lowerCAmelCase__ :str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token lowerCAmelCase__ :Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token lowerCAmelCase__ :List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ :int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , ) with open(__a , encoding='utf-8' ) as vocab_handle: lowerCAmelCase__ :int = json.load(__a ) lowerCAmelCase__ :str = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ :int = errors # how to handle errors in decoding lowerCAmelCase__ :str = bytes_to_unicode() lowerCAmelCase__ :Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__a , encoding='utf-8' ) as merges_handle: lowerCAmelCase__ :List[Any] = merges_handle.read().split('\n' )[1:-1] lowerCAmelCase__ :Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ :List[Any] = dict(zip(__a , range(len(__a ) ) ) ) lowerCAmelCase__ :Any = {} lowerCAmelCase__ :List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ :Optional[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def snake_case ( self ): '''simple docstring''' return len(self.encoder ) def snake_case ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCAmelCase__ :Optional[int] = tuple(__a ) lowerCAmelCase__ :List[Any] = get_pairs(__a ) if not pairs: return token while True: lowerCAmelCase__ :List[str] = min(__a , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__a , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ :Union[str, Any] = bigram lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Optional[Any] = 0 while i < len(__a ): try: lowerCAmelCase__ :Dict = word.index(__a , __a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ :str = j if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ :str = tuple(__a ) lowerCAmelCase__ :Optional[int] = new_word if len(__a ) == 1: break else: lowerCAmelCase__ :Any = get_pairs(__a ) lowerCAmelCase__ :str = " ".join(__a ) lowerCAmelCase__ :Dict = word return word def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = [] for token in re.findall(self.pat , __a ): lowerCAmelCase__ :List[str] = "".join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(' ' ) ) return bpe_tokens def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.encoder.get(__a , self.encoder.get(self.unk_token ) ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.decoder.get(__a ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = "".join(__a ) lowerCAmelCase__ :List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase__ :Tuple = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ :Dict = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(__a , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' ) lowerCAmelCase__ :str = 0 with open(__a , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ' Please check that the tokenizer is not corrupted!' ) lowerCAmelCase__ :Union[str, Any] = token_index writer.write(' '.join(__a ) + '\n' ) index += 1 return vocab_file, merge_file def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ :Tuple = [self.cls_token_id] lowerCAmelCase__ :List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [self.sep_token_id] lowerCAmelCase__ :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()): lowerCAmelCase__ :List[Any] = " " + text return (text, kwargs) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase = None , __UpperCAmelCase = None , ): '''simple docstring''' lowerCAmelCase__ :Any = super()._pad( encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase__ :str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase__ :Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase__ :str = len(encoded_inputs['global_attention_mask'] ) != len(__a ) if needs_to_be_padded: lowerCAmelCase__ :List[str] = len(__a ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase__ :str = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase__ :List[Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
707
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = AudioLDMPipeline __magic_name__ :Union[str, Any] = TEXT_TO_AUDIO_PARAMS __magic_name__ :Tuple = TEXT_TO_AUDIO_BATCH_PARAMS __magic_name__ :Dict = frozenset( [ """num_inference_steps""", """num_waveforms_per_prompt""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :Union[str, Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__UpperCAmelCase , ) lowerCAmelCase__ :Tuple = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) lowerCAmelCase__ :Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase__ :Dict = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , ) lowerCAmelCase__ :int = ClapTextModelWithProjection(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 ) lowerCAmelCase__ :Dict = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , ) lowerCAmelCase__ :str = SpeechTaHifiGan(__UpperCAmelCase ) lowerCAmelCase__ :str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Tuple = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Optional[int] = self.get_dummy_components() lowerCAmelCase__ :List[str] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Any = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 2_5_6 lowerCAmelCase__ :int = audio[:1_0] lowerCAmelCase__ :Optional[Any] = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :int = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = audioldm_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']] # forward lowerCAmelCase__ :Dict = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = output.audios[0] lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = 3 * [inputs.pop('prompt' )] lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.tokenizer( __UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , ) lowerCAmelCase__ :Union[str, Any] = text_inputs['input_ids'].to(__UpperCAmelCase ) lowerCAmelCase__ :Dict = audioldm_pipe.text_encoder( __UpperCAmelCase , ) lowerCAmelCase__ :Any = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state lowerCAmelCase__ :List[Any] = F.normalize(__UpperCAmelCase , dim=-1 ) lowerCAmelCase__ :int = prompt_embeds # forward lowerCAmelCase__ :str = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.get_dummy_components() lowerCAmelCase__ :str = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = audioldm_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt'] lowerCAmelCase__ :str = negative_prompt lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']] # forward lowerCAmelCase__ :Any = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = output.audios[0] lowerCAmelCase__ :List[str] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = 3 * [inputs.pop('prompt' )] lowerCAmelCase__ :str = [] for p in [prompt, negative_prompt]: lowerCAmelCase__ :Optional[Any] = audioldm_pipe.tokenizer( __UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , ) lowerCAmelCase__ :List[Any] = text_inputs['input_ids'].to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = audioldm_pipe.text_encoder( __UpperCAmelCase , ) lowerCAmelCase__ :Tuple = text_embeds.text_embeds # additional L_2 normalization over each hidden-state lowerCAmelCase__ :Dict = F.normalize(__UpperCAmelCase , dim=-1 ) embeds.append(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ :Tuple = embeds # forward lowerCAmelCase__ :Dict = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :str = 'egg cracking' lowerCAmelCase__ :Optional[int] = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 2_5_6 lowerCAmelCase__ :List[Any] = audio[:1_0] lowerCAmelCase__ :Any = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Tuple = self.get_dummy_components() lowerCAmelCase__ :Optional[int] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Tuple = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) lowerCAmelCase__ :Tuple = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios assert audios.shape == (1, 2_5_6) # test num_waveforms_per_prompt=1 (default) for batch of prompts lowerCAmelCase__ :str = 2 lowerCAmelCase__ :Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_5_6) # test num_waveforms_per_prompt for single prompt lowerCAmelCase__ :Any = 2 lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 2_5_6) # test num_waveforms_per_prompt for batch of prompts lowerCAmelCase__ :List[str] = 2 lowerCAmelCase__ :List[str] = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Dict = self.get_dummy_components() lowerCAmelCase__ :Dict = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = audioldm_pipe.vocoder.config.sampling_rate lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **__UpperCAmelCase ) lowerCAmelCase__ :int = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.0_16 lowerCAmelCase__ :List[Any] = audioldm_pipe(audio_length_in_s=0.0_32 , **__UpperCAmelCase ) lowerCAmelCase__ :str = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.0_32 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :Optional[int] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = ['hey'] lowerCAmelCase__ :Any = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 ) lowerCAmelCase__ :List[Any] = output.audios.shape assert audio_shape == (1, 2_5_6) lowerCAmelCase__ :List[Any] = audioldm_pipe.vocoder.config config.model_in_dim *= 2 lowerCAmelCase__ :Tuple = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ :Any = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 ) lowerCAmelCase__ :Any = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_5_6) def snake_case ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def snake_case ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase ) @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 1_2_8, 1_6) ) lowerCAmelCase__ :Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) lowerCAmelCase__ :Optional[Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Dict = 2_5 lowerCAmelCase__ :List[Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 8_1_9_2_0 lowerCAmelCase__ :Optional[Any] = audio[7_7_2_3_0:7_7_2_4_0] lowerCAmelCase__ :Dict = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) lowerCAmelCase__ :int = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) lowerCAmelCase__ :int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 8_1_9_2_0 lowerCAmelCase__ :Tuple = audio[2_7_7_8_0:2_7_7_9_0] lowerCAmelCase__ :Union[str, Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) lowerCAmelCase__ :Any = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
560
0
import math from collections.abc import Callable def UpperCamelCase ( __magic_name__ : Callable[[float], float] , __magic_name__ : float , __magic_name__ : float ) -> float: """simple docstring""" lowercase__ = xa lowercase__ = xa while True: if x_n == x_na or function(__magic_name__ ) == function(__magic_name__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) lowercase__ = x_na - ( function(__magic_name__ ) / ((function(__magic_name__ ) - function(__magic_name__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na lowercase__ = x_na lowercase__ = x_na def UpperCamelCase ( __magic_name__ : float ) -> float: """simple docstring""" return math.pow(__magic_name__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
15
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ): __a : Tuple = IFPipeline __a : List[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __a : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS __a : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def _snake_case ( self ) -> List[str]: '''simple docstring''' return self._get_dummy_components() def _snake_case ( self , lowercase , lowercase=0 ) -> int: '''simple docstring''' if str(lowercase ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowercase ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowercase ).manual_seed(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ) -> int: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def _snake_case ( self ) -> Tuple: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def _snake_case ( self ) -> Dict: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _snake_case ( self ) -> Any: '''simple docstring''' self._test_save_load_local() def _snake_case ( self ) -> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _snake_case ( self ) -> Optional[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def _snake_case ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[str] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Dict = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowercase , tokenizer=lowercase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : Tuple = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(lowercase , lowercase , lowercase , lowercase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __SCREAMING_SNAKE_CASE : List[str] = IFImgaImgPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(lowercase , lowercase , lowercase , lowercase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __SCREAMING_SNAKE_CASE : str = IFInpaintingPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(lowercase , lowercase , lowercase , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (6_4, 6_4, 3) __SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 __SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Dict = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Dict = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __SCREAMING_SNAKE_CASE : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) __SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Tuple = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) __SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __SCREAMING_SNAKE_CASE : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Dict = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __SCREAMING_SNAKE_CASE : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) def A_ ( ) -> List[str]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
158
0
def UpperCamelCase_ ( lowerCAmelCase__ ): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def UpperCamelCase_ ( lowerCAmelCase__ = 1_00 ): """simple docstring""" _lowerCAmelCase : Optional[Any] = 1 _lowerCAmelCase : Any = 2 for i in range(2 , max_n + 1 ): _lowerCAmelCase : Union[str, Any] = pre_numerator _lowerCAmelCase : List[str] = 2 * i // 3 if i % 3 == 0 else 1 _lowerCAmelCase : int = cur_numerator _lowerCAmelCase : int = e_cont * pre_numerator + temp return sum_digits(lowerCAmelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
587
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor snake_case = logging.get_logger(__name__) class __A ( snake_case__ ): '''simple docstring''' def __init__( self , *_snake_case , **_snake_case ): warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
587
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : List[str] = logging.get_logger(__name__) _lowercase : Tuple = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class UpperCamelCase__( _lowerCAmelCase ): __magic_name__ : List[Any] = 'big_bird' def __init__( self : str , lowerCAmelCase : Any=50358 , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Optional[Any]=3072 , lowerCAmelCase : List[str]="gelu_new" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[int]=4096 , lowerCAmelCase : Any=2 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : List[Any]=1E-12 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[int]=66 , lowerCAmelCase : Dict="block_sparse" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str=False , lowerCAmelCase : str=64 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : List[Any] , )-> Union[str, Any]: """simple docstring""" super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , sep_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = rescale_embeddings UpperCAmelCase = attention_type UpperCAmelCase = use_bias UpperCAmelCase = block_size UpperCAmelCase = num_random_blocks UpperCAmelCase = classifier_dropout class UpperCamelCase__( _lowerCAmelCase ): @property def a__( self : List[str] )-> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
210
import os from collections.abc import Iterator def snake_case ( lowerCamelCase = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(lowerCamelCase ): __lowercase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(lowerCamelCase )[1] in (".py", ".ipynb"): yield os.path.join(lowerCamelCase , lowerCamelCase ).lstrip("""./""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' return F'{i * " "}*' if i else "\n##" def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(lowerCamelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(lowerCamelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def snake_case ( lowerCamelCase = "." ): '''simple docstring''' __lowercase = """""" for filepath in sorted(good_file_paths(lowerCamelCase ) ): __lowercase , __lowercase = os.path.split(lowerCamelCase ) if filepath != old_path: __lowercase = print_path(lowerCamelCase , lowerCamelCase ) __lowercase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowercase = F'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowercase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(F'{md_prefix(lowerCamelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md(""".""")
80
0
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ) -> tuple: return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ) -> np.ndarray: _a : int =XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(_UpperCAmelCase ,_UpperCAmelCase ) # Predict target for test data _a : Any =xgb.predict(_UpperCAmelCase ) _a : Dict =predictions.reshape(len(_UpperCAmelCase ) ,1 ) return predictions def SCREAMING_SNAKE_CASE_ ( ) -> None: _a : Dict =fetch_california_housing() _a , _a : List[str] =data_handling(_UpperCAmelCase ) _a , _a , _a , _a : Union[str, Any] =train_test_split( _UpperCAmelCase ,_UpperCAmelCase ,test_size=0.2_5 ,random_state=1 ) _a : Dict =xgboost(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Error printing print(F"Mean Absolute Error : {mean_absolute_error(_UpperCAmelCase ,_UpperCAmelCase )}" ) print(F"Mean Square Error : {mean_squared_error(_UpperCAmelCase ,_UpperCAmelCase )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
506
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> float: _a : Union[str, Any] =0 while len(_UpperCAmelCase ) > 1: _a : Any =0 # Consider two files with minimum cost to be merged for _ in range(2 ): _a : Optional[int] =files.index(min(_UpperCAmelCase ) ) temp += files[min_index] files.pop(_UpperCAmelCase ) files.append(_UpperCAmelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
506
1
"""simple docstring""" from collections.abc import Sequence from queue import Queue class lowerCAmelCase__ : def __init__( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : int=None , snake_case__ : str=None ): '''simple docstring''' UpperCAmelCase__ : Tuple = start UpperCAmelCase__ : str = end UpperCAmelCase__ : Optional[int] = val UpperCAmelCase__ : List[str] = (start + end) // 2 UpperCAmelCase__ : Tuple = left UpperCAmelCase__ : List[str] = right def __repr__( self : Tuple ): '''simple docstring''' return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})' class lowerCAmelCase__ : def __init__( self : List[Any] , snake_case__ : Sequence , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = collection UpperCAmelCase__ : Optional[int] = function if self.collection: UpperCAmelCase__ : Optional[int] = self._build_tree(0 , len(snake_case__ ) - 1 ) def __a ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' self._update_tree(self.root , snake_case__ , snake_case__ ) def __a ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] ): '''simple docstring''' return self._query_range(self.root , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' if start == end: return SegmentTreeNode(snake_case__ , snake_case__ , self.collection[start] ) UpperCAmelCase__ : List[str] = (start + end) // 2 UpperCAmelCase__ : Optional[Any] = self._build_tree(snake_case__ , snake_case__ ) UpperCAmelCase__ : str = self._build_tree(mid + 1 , snake_case__ ) return SegmentTreeNode(snake_case__ , snake_case__ , self.fn(left.val , right.val ) , snake_case__ , snake_case__ ) def __a ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): '''simple docstring''' if node.start == i and node.end == i: UpperCAmelCase__ : int = val return if i <= node.mid: self._update_tree(node.left , snake_case__ , snake_case__ ) else: self._update_tree(node.right , snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = self.fn(node.left.val , node.right.val ) def __a ( self : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , snake_case__ , snake_case__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , snake_case__ , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case__ ) , ) else: # range in right child tree return self._query_range(node.right , snake_case__ , snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' if self.root is not None: UpperCAmelCase__ : Optional[int] = Queue() queue.put(self.root ) while not queue.empty(): UpperCAmelCase__ : str = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) _lowerCAmelCase : Dict = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
438
"""simple docstring""" import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Optional[int] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCAmelCase__ : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCAmelCase__ : str = "xvjiarui/stable-diffusion-2-inpainting" UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ ) UpperCAmelCase__ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench" UpperCAmelCase__ : List[str] = jax.random.PRNGKey(0 ) UpperCAmelCase__ : Optional[int] = 5_0 UpperCAmelCase__ : List[Any] = jax.device_count() UpperCAmelCase__ : int = num_samples * [prompt] UpperCAmelCase__ : str = num_samples * [init_image] UpperCAmelCase__ : Optional[Any] = num_samples * [mask_image] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pipeline.prepare_inputs(snake_case__ , snake_case__ , snake_case__ ) # shard inputs and rng UpperCAmelCase__ : Dict = replicate(snake_case__ ) UpperCAmelCase__ : Dict = jax.random.split(snake_case__ , jax.device_count() ) UpperCAmelCase__ : List[str] = shard(snake_case__ ) UpperCAmelCase__ : str = shard(snake_case__ ) UpperCAmelCase__ : Optional[int] = shard(snake_case__ ) UpperCAmelCase__ : Dict = pipeline( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ) UpperCAmelCase__ : List[str] = output.images.reshape(snake_case__ , 5_1_2 , 5_1_2 , 3 ) UpperCAmelCase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] UpperCAmelCase__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase__ : int = jnp.array( [0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
438
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
596
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _A : def __init__( self : int ) -> Any: """simple docstring""" lowercase : List[Any] = '''''' lowercase : Optional[int] = '''''' lowercase : str = [] lowercase : List[Any] = 0 lowercase : str = 256 lowercase : Dict = 0 lowercase : Optional[int] = 0 lowercase : List[str] = 0 lowercase : str = 0 def __a ( self : List[str] , _A : int ) -> int: """simple docstring""" lowercase : List[Any] = cva.imread(_A , 0 ) lowercase : List[Any] = copy.deepcopy(self.img ) lowercase , lowercase , lowercase : int = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' ) lowercase : Optional[int] = np.sum(_A ) for i in range(len(_A ) ): lowercase : Optional[int] = x[i] / self.k self.sk += prk lowercase : Tuple = (self.L - 1) * self.sk if self.rem != 0: lowercase : Tuple = int(last % last ) lowercase : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_A ) lowercase : Dict = int(np.ma.count(self.img ) / self.img[1].size ) lowercase : Optional[Any] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowercase : Any = self.img[j][i] if num != self.last_list[num]: lowercase : Dict = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def __a ( self : Dict ) -> Any: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase_ = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
596
1
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a__ ( ) -> Union[str, Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowercase ): requests.request('''GET''', '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 ) @pytest.mark.integration def a__ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''', '''https://huggingface.co''' ) def a__ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowercase ): http_head('''https://huggingface.co''' )
98
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__) @dataclass class a__ ( datasets.BuilderConfig ): UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class a__ ( datasets.ArrowBasedBuilder ): UpperCAmelCase__ = JsonConfig def lowerCamelCase_ ( self :List[str] ): '''simple docstring''' if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) UpperCamelCase_ : List[Any] =self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[str] ): '''simple docstring''' if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) UpperCamelCase_ : int =dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): UpperCamelCase_ : Tuple =data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCamelCase_ : Union[str, Any] =[files] UpperCamelCase_ : List[str] =[dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] UpperCamelCase_ : Optional[Any] =[] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCamelCase_ : Tuple =[files] UpperCamelCase_ : Optional[Any] =[dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'files': files} ) ) return splits def lowerCamelCase_ ( self :int , _lowerCamelCase :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): UpperCamelCase_ : Dict =self.config.features.arrow_schema.field(_lowerCamelCase ).type UpperCamelCase_ : Optional[Any] =pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example UpperCamelCase_ : int =table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[int] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCamelCase_ : List[str] =json.load(_lowerCamelCase ) # We keep only the field we are interested in UpperCamelCase_ : Tuple =dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): UpperCamelCase_ : Tuple =set().union(*[row.keys() for row in dataset] ) UpperCamelCase_ : str ={col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: UpperCamelCase_ : Any =dataset UpperCamelCase_ : List[Any] =pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , 'rb' ) as f: UpperCamelCase_ : Any =0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small UpperCamelCase_ : List[str] =max(self.config.chunksize // 32 , 16 << 10 ) UpperCamelCase_ : Dict =( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: UpperCamelCase_ : Optional[Any] =f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": UpperCamelCase_ : List[Any] =batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('utf-8' ) try: while True: try: UpperCamelCase_ : str =paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'''Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCamelCase_ : List[Any] =json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: UpperCamelCase_ : str =set().union(*[row.keys() for row in dataset] ) UpperCamelCase_ : List[Any] ={col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} UpperCamelCase_ : Optional[Any] =pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' ) raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' ) raise ValueError( f'''Not able to read records in the JSON file at {file}. ''' f'''You should probably indicate the field of the JSON file containing your records. ''' f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
357
0
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ): """simple docstring""" a__ = len(_lowercase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowercase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowercase , _lowercase , ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = [] depth_first_search([] , [] , [] , _lowercase , _lowercase ) # Print all the boards for board in boards: for column in board: print(_lowercase ) print("" ) print(len(_lowercase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
394
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ : Any = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : Optional[int] = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys UpperCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
394
1
import qiskit def lowerCamelCase ( a_ , a_ ) -> qiskit.result.counts.Counts: lowerCAmelCase_ = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register lowerCAmelCase_ = qiskit.QuantumCircuit(__a , __a ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCAmelCase_ = qiskit.execute(__a , __a , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__a ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
318
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] ={} lowerCamelCase__: Tuple ={} lowerCamelCase__: str ={} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase__: int =kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase__: Any =kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase__: int =kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase__: Optional[int] =kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase__: str =kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase__: Any =kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase__: List[Any] =kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =load_image(UpperCAmelCase_) lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt") with self.device_placement(): if self.framework == "pt": lowerCamelCase__: str =self.get_inference_context() with inference_context(): lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device) lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values")) lowerCamelCase__: str =image_embeddings lowerCamelCase__: int =grid_points.shape[1] lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch] lowerCamelCase__: Dict =i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =model_inputs.pop("input_boxes") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist() lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist() lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__: Optional[int] =model_outputs["pred_masks"] lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple: '''simple docstring''' lowerCamelCase__: Any =[] lowerCamelCase__: Optional[int] =[] lowerCamelCase__: List[str] =[] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) lowerCamelCase__: str =torch.cat(UpperCAmelCase_) lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_) lowerCamelCase__: Any ={} if output_rle_mask: lowerCamelCase__: Union[str, Any] =rle_mask if output_bboxes_mask: lowerCamelCase__: int =bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
59
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
704
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = XLNetTokenizer __SCREAMING_SNAKE_CASE : str = XLNetTokenizerFast __SCREAMING_SNAKE_CASE : Tuple = True __SCREAMING_SNAKE_CASE : Tuple = True def __lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a : str = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Tuple = '<s>' __a : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' __a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<eod>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_0_0_6 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) __a : Dict = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) __a : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __a : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) __a : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : Union[str, Any] = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) __a : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) __a : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' __a : str = XLNetTokenizer.from_pretrained('xlnet-base-cased' ) __a : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __a : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' __a : str = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
577
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a__ : Any = datasets.utils.logging.get_logger(__name__) a__ : str = ['names', 'prefix'] a__ : Any = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] a__ : Dict = ['encoding_errors', 'on_bad_lines'] a__ : Optional[Any] = ['date_format'] @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase ="," _lowerCamelCase =None _lowerCamelCase ="infer" _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =True _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =False _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =True _lowerCamelCase =True _lowerCamelCase =False _lowerCamelCase =True _lowerCamelCase =None _lowerCamelCase ="." _lowerCamelCase =None _lowerCamelCase ='"' _lowerCamelCase =0 _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =None _lowerCamelCase =True _lowerCamelCase =True _lowerCamelCase =0 _lowerCamelCase =True _lowerCamelCase =False _lowerCamelCase =None _lowerCamelCase =1_00_00 _lowerCamelCase =None _lowerCamelCase ="strict" _lowerCamelCase ="error" _lowerCamelCase =None def __snake_case ( self : Any ): if self.delimiter is not None: UpperCAmelCase = self.delimiter if self.column_names is not None: UpperCAmelCase = self.column_names @property def __snake_case ( self : Optional[Any] ): UpperCAmelCase = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase =CsvConfig def __snake_case ( self : Optional[int] ): return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self : int , a__ : List[str] ): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a__ , (str, list, tuple) ): UpperCAmelCase = data_files if isinstance(a__ , a__ ): UpperCAmelCase = [files] UpperCAmelCase = [dl_manager.iter_files(a__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] UpperCAmelCase = [] for split_name, files in data_files.items(): if isinstance(a__ , a__ ): UpperCAmelCase = [files] UpperCAmelCase = [dl_manager.iter_files(a__ ) for file in files] splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self : Any , a__ : pa.Table ): if self.config.features is not None: UpperCAmelCase = self.config.features.arrow_schema if all(not require_storage_cast(a__ ) for feature in self.config.features.values() ): # cheaper cast UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example UpperCAmelCase = table_cast(a__ , a__ ) return pa_table def __snake_case ( self : Any , a__ : Tuple ): UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str UpperCAmelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(a__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ): UpperCAmelCase = pd.read_csv(a__ , iterator=a__ , dtype=a__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(a__ ): UpperCAmelCase = pa.Table.from_pandas(a__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(a__ ) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(a__ )}: {e}" ) raise
51
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ ) elif "height" in size and "width" in size: UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(a__ ) if do_resize: UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ ) if do_center_crop: UpperCAmelCase = self.center_crop(a__ , size=a__ ) if do_rescale: UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ ) if do_normalize: UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ ) UpperCAmelCase = to_channel_dimension_format(a__ , a__ ) return image def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase = make_batched(a__ ) UpperCAmelCase = [ [ self._preprocess_image( image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , ) for img in video ] for video in videos ] UpperCAmelCase = {'''pixel_values''': videos} return BatchFeature(data=a__ , tensor_type=a__ )
51
1
'''simple docstring''' import unittest from knapsack import knapsack as k class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = [0] UpperCamelCase = [0] UpperCamelCase = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 0 ) UpperCamelCase = [6_0] UpperCamelCase = [1_0] UpperCamelCase = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 0 ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = 3 UpperCamelCase = [1, 2, 3] UpperCamelCase = [3, 2, 1] UpperCamelCase = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 5 ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = 5_0 UpperCamelCase = [6_0, 1_0_0, 1_2_0] UpperCamelCase = [1_0, 2_0, 3_0] UpperCamelCase = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 2_2_0 ) if __name__ == "__main__": unittest.main()
711
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any): UpperCamelCase = 0 if start < end: UpperCamelCase = randint(_UpperCAmelCase, _UpperCAmelCase) UpperCamelCase = a[end] UpperCamelCase = a[pivot] UpperCamelCase = temp UpperCamelCase , UpperCamelCase = _in_place_partition(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase) count += _in_place_quick_sort(_UpperCAmelCase, _UpperCAmelCase, p - 1) count += _in_place_quick_sort(_UpperCAmelCase, p + 1, _UpperCAmelCase) return count def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any): UpperCamelCase = 0 UpperCamelCase = randint(_UpperCAmelCase, _UpperCAmelCase) UpperCamelCase = a[end] UpperCamelCase = a[pivot] UpperCamelCase = temp UpperCamelCase = start - 1 for index in range(_UpperCAmelCase, _UpperCAmelCase): count += 1 if a[index] < a[end]: # check if current val is less than pivot value UpperCamelCase = new_pivot_index + 1 UpperCamelCase = a[new_pivot_index] UpperCamelCase = a[index] UpperCamelCase = temp UpperCamelCase = a[new_pivot_index + 1] UpperCamelCase = a[end] UpperCamelCase = temp return new_pivot_index + 1, count snake_case_ : List[Any] = TemporaryFile() snake_case_ : Optional[Any] = 100 # 1000 elements are to be sorted snake_case_ , snake_case_ : List[str] = 0, 1 # mean and standard deviation snake_case_ : Any = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array snake_case_ : Optional[int] = np.load(outfile) snake_case_ : Optional[int] = len(M) - 1 snake_case_ : List[str] = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
350
0
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 10 , lowerCamelCase__ = 22 ): """simple docstring""" lowerCAmelCase__ = range(1 , lowerCamelCase__ ) lowerCAmelCase__ = range(1 , lowerCamelCase__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"{solution(10, 22) = }")
644
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __lowerCAmelCase : Dict = "CompVis/stable-diffusion-v1-1" __lowerCAmelCase : int = "CompVis/stable-diffusion-v1-2" __lowerCAmelCase : int = "CompVis/stable-diffusion-v1-3" __lowerCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4" class a_ ( __UpperCamelCase ): def __init__( self : List[Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case__ : StableDiffusionSafetyChecker , snake_case__ : CLIPImageProcessor , snake_case__ : bool = True , ): super()._init_() lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ ) lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ ) lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ ) lowerCAmelCase__ = StableDiffusionPipeline( vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , requires_safety_checker=snake_case__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ): return {k: getattr(self , snake_case__ ) for k in self.config.keys() if not k.startswith("""_""" )} def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): self.enable_attention_slicing(snake_case__ ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : List[Any] , ): return self.pipea( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Tuple , ): return self.pipea( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Optional[Any] , ): return self.pipea( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : str , ): return self.pipea( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Optional[Any] , ): lowerCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(snake_case__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCAmelCase__ = self.textaimg_sda_a( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCAmelCase__ = self.textaimg_sda_a( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCAmelCase__ = self.textaimg_sda_a( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCAmelCase__ = self.textaimg_sda_a( prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): A : Any = ["pixel_values"] def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_55 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Union[str, Any] , ): super().__init__(**_lowerCAmelCase ) __snake_case : Tuple = size if size is not None else {"""shortest_edge""": 2_24} __snake_case : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) __snake_case : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} __snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" ) __snake_case : Union[str, Any] = do_resize __snake_case : Optional[Any] = size __snake_case : int = do_center_crop __snake_case : Dict = crop_size __snake_case : Dict = resample __snake_case : Tuple = do_rescale __snake_case : Optional[int] = rescale_factor __snake_case : str = do_normalize __snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ): __snake_case : List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) if "shortest_edge" in size: __snake_case : Tuple = get_resize_output_image_size(_lowerCAmelCase , size["""shortest_edge"""] , default_to_square=_lowerCAmelCase ) elif "height" in size and "width" in size: __snake_case : List[Any] = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ): __snake_case : List[str] = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def snake_case__ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ): return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Dict , ): return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def snake_case__ ( self : Tuple , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __snake_case : Tuple = to_numpy_array(_lowerCAmelCase ) if do_resize: __snake_case : List[Any] = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) if do_center_crop: __snake_case : Dict = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase ) if do_rescale: __snake_case : int = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) if do_normalize: __snake_case : List[Any] = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) __snake_case : Optional[Any] = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) return image def snake_case__ ( self : List[str] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Union[str, Any] , ): __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Any = resample if resample is not None else self.resample __snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : List[str] = image_mean if image_mean is not None else self.image_mean __snake_case : List[str] = image_std if image_std is not None else self.image_std __snake_case : Optional[Any] = size if size is not None else self.size __snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) __snake_case : Any = crop_size if crop_size is not None else self.crop_size __snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" ) if not valid_images(_lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __snake_case : Optional[Any] = make_batched(_lowerCAmelCase ) __snake_case : int = [ [ self._preprocess_image( image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , ) for img in video ] for video in videos ] __snake_case : Optional[int] = {"""pixel_values""": videos} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
390
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase : str = 16 _lowerCAmelCase : Dict = 32 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 16 ) -> List[Any]: '''simple docstring''' _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) _lowerCamelCase : List[Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCamelCase : Any = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCamelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCamelCase : str = 16 elif accelerator.mixed_precision != "no": _lowerCamelCase : Union[str, Any] = 8 else: _lowerCamelCase : Optional[int] = None return tokenizer.pad( _lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. _lowerCamelCase : Any = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) _lowerCamelCase : List[str] = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase : Dict = mocked_dataloaders # noqa: F811 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1": _lowerCamelCase : List[str] = 2 # New Code # _lowerCamelCase : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator _lowerCamelCase : Dict = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : Tuple = config["lr"] _lowerCamelCase : int = int(config["num_epochs"] ) _lowerCamelCase : Any = int(config["seed"] ) _lowerCamelCase : Optional[int] = int(config["batch_size"] ) _lowerCamelCase : Optional[int] = evaluate.load("glue" , "mrpc" ) set_seed(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCamelCase : List[str] = model.to(accelerator.device ) # Instantiate optimizer _lowerCamelCase : Any = AdamW(params=model.parameters() , lr=_lowerCamelCase ) # Instantiate scheduler _lowerCamelCase : List[Any] = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Now we train the model for epoch in range(_lowerCamelCase ): model.train() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_lowerCamelCase ): _lowerCamelCase : List[str] = model(**_lowerCamelCase ) _lowerCamelCase : Dict = output.loss accelerator.backward(_lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : str = model(**_lowerCamelCase ) _lowerCamelCase : Any = outputs.logits.argmax(dim=-1 ) _lowerCamelCase, _lowerCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) _lowerCamelCase : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase ) def lowerCamelCase_( ) -> Tuple: '''simple docstring''' _lowerCamelCase : List[str] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) _lowerCamelCase : Any = parser.parse_args() _lowerCamelCase : Dict = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
46
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( snake_case , unittest.TestCase ): UpperCamelCase_ :List[Any] = LayoutLMTokenizer UpperCamelCase_ :Dict = LayoutLMTokenizerFast UpperCamelCase_ :List[str] = True UpperCamelCase_ :Dict = True def UpperCAmelCase_ ( self )-> str: super().setUp() UpperCamelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCAmelCase_ ( self , **_lowercase )-> List[str]: return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCAmelCase_ ( self , _lowercase )-> Union[str, Any]: UpperCamelCase_ = "UNwant\u00E9d,running" UpperCamelCase_ = "unwanted, running" return input_text, output_text def UpperCAmelCase_ ( self )-> Tuple: UpperCamelCase_ = self.tokenizer_class(self.vocab_file ) UpperCamelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] ) def UpperCAmelCase_ ( self )-> Dict: pass
628
0
'''simple docstring''' import os from pathlib import Path def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' A_ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] A_ = { '''wmt16-en-de-dist-12-1''': [28.3, 27.52], '''wmt16-en-de-dist-6-1''': [27.4, 27.11], '''wmt16-en-de-12-1''': [26.9, 25.75], } A_ = f"{src_lang}-{tgt_lang}" A_ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n" model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase ) A_ = os.path.join(_UpperCamelCase , '''README.md''' ) print(f"Generating {path}" ) with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(_UpperCamelCase ) # make sure we are under the root of the project __lowercase = Path(__file__).resolve().parent.parent.parent __lowercase = repo_dir / """model_cards""" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: __lowercase = model_cards_dir / """allenai""" / model_name write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
716
import datasets from .evaluate import evaluate __lowercase = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ __lowercase = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ __lowercase = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> Optional[Any]: """simple docstring""" A_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} A_ = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] A_ = evaluate(dataset=lowerCamelCase__ , predictions=lowerCamelCase__ ) return score
563
0
'''simple docstring''' import argparse from collections import defaultdict import yaml _lowerCamelCase = """docs/source/en/_toctree.yml""" def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = defaultdict(_SCREAMING_SNAKE_CASE ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase_ : Optional[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase_ : List[str] = [] for duplicate_key in duplicates: UpperCAmelCase_ : List[str] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(_SCREAMING_SNAKE_CASE ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : s["title"].lower() ) def a__ ( _SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f: UpperCAmelCase_ : int = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase_ : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase_ : Tuple = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase_ : List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase_ : List[str] = api_doc[model_idx]["sections"] UpperCAmelCase_ : List[str] = [(idx, section) for idx, section in enumerate(_SCREAMING_SNAKE_CASE ) if "sections" in section] UpperCAmelCase_ : Optional[Any] = False for idx, modality_doc in modalities_docs: UpperCAmelCase_ : Dict = modality_doc["sections"] UpperCAmelCase_ : Optional[int] = clean_model_doc_toc(_SCREAMING_SNAKE_CASE ) if old_modality_doc != new_modality_doc: UpperCAmelCase_ : Union[str, Any] = True if overwrite: UpperCAmelCase_ : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase_ : List[str] = model_doc UpperCAmelCase_ : List[str] = api_doc with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowerCamelCase = parser.parse_args() check_model_doc(args.fix_and_overwrite)
71
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCamelCase = logging.getLogger(__name__) @dataclass class _snake_case : __A : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __A : Optional[str] =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."}) __A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."}) @dataclass class _snake_case : __A : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}) __A : Optional[str] =field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __A : Optional[int] =field( default=10_24 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_28 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __A : Optional[int] =field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."}) __A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."}) __A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."}) __A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."}) __A : bool =field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) ) def a__ ( ) -> Any: """simple docstring""" UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses() check_output_dir(_SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_SCREAMING_SNAKE_CASE ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ : Dict = SeqaSeqDataset # Get datasets UpperCAmelCase_ : Tuple = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) UpperCAmelCase_ : Dict = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ : int = ( dataset_class( _SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ : Optional[Any] = ( build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None ) UpperCAmelCase_ : List[str] = SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase_ : List[Any] = {} # Training if training_args.do_train: logger.info("*** Train ***" ) UpperCAmelCase_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ : int = train_result.metrics UpperCAmelCase_ : Dict = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" ) UpperCAmelCase_ : Optional[Any] = data_args.n_val UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("*** Predict ***" ) UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" ) UpperCAmelCase_ : List[str] = test_output.metrics UpperCAmelCase_ : int = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 ) handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(_SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate: UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE ) write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
71
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a_ ( _UpperCAmelCase ): a : jnp.ndarray @flax_register_to_config class a_ ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ): a : int = 32 a : int = 4 a : int = 4 a : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) a : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") a : Union[bool, Tuple[bool]] = False a : Tuple[int] = (320, 640, 1280, 1280) a : int = 2 a : Union[int, Tuple[int]] = 8 a : Optional[Union[int, Tuple[int]]] = None a : int = 1280 a : float = 0.0 a : bool = False a : jnp.dtype = jnp.floataa a : bool = True a : int = 0 a : bool = False def _snake_case ( self : Tuple , __UpperCamelCase : jax.random.KeyArray ) ->FrozenDict: '''simple docstring''' _UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCAmelCase = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa ) _UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCAmelCase ,_UpperCAmelCase = jax.random.split(__UpperCamelCase ) _UpperCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["params"] def _snake_case ( self : Union[str, Any] ) ->str: '''simple docstring''' _UpperCAmelCase = self.block_out_channels _UpperCAmelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( """At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCAmelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCAmelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCAmelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCAmelCase = FlaxTimestepEmbedding(__UpperCamelCase , dtype=self.dtype ) _UpperCAmelCase = self.only_cross_attention if isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCAmelCase = [] _UpperCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCAmelCase = output_channel _UpperCAmelCase = block_out_channels[i] _UpperCAmelCase = i == len(__UpperCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCAmelCase = FlaxCrossAttnDownBlockaD( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCAmelCase = FlaxDownBlockaD( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(__UpperCamelCase ) _UpperCAmelCase = down_blocks # mid _UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCAmelCase = [] _UpperCAmelCase = list(reversed(__UpperCamelCase ) ) _UpperCAmelCase = list(reversed(__UpperCamelCase ) ) _UpperCAmelCase = list(reversed(__UpperCamelCase ) ) _UpperCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCAmelCase = output_channel _UpperCAmelCase = reversed_block_out_channels[i] _UpperCAmelCase = reversed_block_out_channels[min(i + 1 , len(__UpperCamelCase ) - 1 )] _UpperCAmelCase = i == len(__UpperCamelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCAmelCase = FlaxCrossAttnUpBlockaD( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCAmelCase = FlaxUpBlockaD( in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(__UpperCamelCase ) _UpperCAmelCase = output_channel _UpperCAmelCase = up_blocks # out _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(__UpperCamelCase , jnp.ndarray ): _UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(__UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCAmelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCAmelCase = jnp.expand_dims(__UpperCamelCase , 0 ) _UpperCAmelCase = self.time_proj(__UpperCamelCase ) _UpperCAmelCase = self.time_embedding(__UpperCamelCase ) # 2. pre-process _UpperCAmelCase = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) ) _UpperCAmelCase = self.conv_in(__UpperCamelCase ) # 3. down _UpperCAmelCase = (sample,) for down_block in self.down_blocks: if isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase ,_UpperCAmelCase = down_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train ) else: _UpperCAmelCase ,_UpperCAmelCase = down_block(__UpperCamelCase , __UpperCamelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCAmelCase = () for down_block_res_sample, down_block_additional_residual in zip( __UpperCamelCase , __UpperCamelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCAmelCase = new_down_block_res_samples # 4. mid _UpperCAmelCase = self.mid_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = up_block( __UpperCamelCase , temb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train , ) else: _UpperCAmelCase = up_block(__UpperCamelCase , temb=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train ) # 6. post-process _UpperCAmelCase = self.conv_norm_out(__UpperCamelCase ) _UpperCAmelCase = nn.silu(__UpperCamelCase ) _UpperCAmelCase = self.conv_out(__UpperCamelCase ) _UpperCAmelCase = jnp.transpose(__UpperCamelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=__UpperCamelCase )
706
"""simple docstring""" import argparse import os import re import packaging.version a : str = '''examples/''' a : List[str] = { '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a : Tuple = { '''init''': '''src/diffusers/__init__.py''', '''setup''': '''setup.py''', } a : List[str] = '''README.md''' def _UpperCamelCase ( _A , _A , _A ) -> Dict: """simple docstring""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern] _UpperCAmelCase = replace.replace("""VERSION""" , _A ) _UpperCAmelCase = re_pattern.sub(_A , _A ) with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(_A ) def _UpperCamelCase ( _A ) -> Union[str, Any]: """simple docstring""" for folder, directories, fnames in os.walk(_A ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" ) def _UpperCamelCase ( _A , _A=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_A , _A , _A ) if not patch: update_version_in_examples(_A ) def _UpperCamelCase ( ) -> Any: """simple docstring""" _UpperCAmelCase = """🤗 Transformers currently provides the following architectures""" _UpperCAmelCase = """1. Want to contribute a new model?""" with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _UpperCAmelCase = f.readlines() # Find the start of the list. _UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): _UpperCAmelCase = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(_A ) def _UpperCamelCase ( ) -> Optional[int]: """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0] return packaging.version.parse(_A ) def _UpperCamelCase ( _A=False ) -> List[Any]: """simple docstring""" _UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: _UpperCAmelCase = default_version.base_version elif patch: _UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(_A ) == 0: _UpperCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(_A , patch=_A ) def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" _UpperCAmelCase = get_version() _UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _UpperCAmelCase = current_version.base_version # Check with the user we got that right. _UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(_A ) == 0: _UpperCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(_A ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": a : Dict = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a : Tuple = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
19
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
'''simple docstring''' def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = [[float("""inf""" ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): _SCREAMING_SNAKE_CASE : List[str] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(SCREAMING_SNAKE_CASE__ ): # looping through rows of graph array for i in range(SCREAMING_SNAKE_CASE__ ): # looping through columns of graph array for j in range(SCREAMING_SNAKE_CASE__ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): _SCREAMING_SNAKE_CASE : int = dist[i][k] + dist[k][j] _print_dist(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return dist, v if __name__ == "__main__": UpperCAmelCase_ : str = int(input('Enter number of vertices: ')) UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of edges: ')) UpperCAmelCase_ : Dict = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): UpperCAmelCase_ : Union[str, Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) UpperCAmelCase_ : int = int(input('Enter source:')) UpperCAmelCase_ : List[str] = int(input('Enter destination:')) UpperCAmelCase_ : List[str] = float(input('Enter weight:')) UpperCAmelCase_ : int = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
533
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCAmelCase : str = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def _UpperCamelCase ( A : Optional[int] ): '''simple docstring''' config.addinivalue_line( "markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested" ) config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment" ) config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate" ) config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule" ) def _UpperCamelCase ( A : Optional[int] ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def _UpperCamelCase ( A : List[str] ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main a = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(A__, id=A__ ) def _UpperCamelCase ( A : str, A : Tuple ): '''simple docstring''' if exitstatus == 5: a = 0 # Doctest custom flag to ignore output. __lowerCAmelCase : int = doctest.register_optionflag('IGNORE_RESULT') __lowerCAmelCase : Optional[Any] = doctest.OutputChecker class snake_case__ (__lowerCamelCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> List[str]: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __lowerCAmelCase : Union[str, Any] = CustomOutputChecker __lowerCAmelCase : str = HfDoctestModule __lowerCAmelCase : List[Any] = HfDocTestParser
714
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]: a = tokenizer a = dataset a = len(__lowerCamelCase ) if n_tasks is None else n_tasks a = n_copies def __iter__( self : Tuple ) -> str: a = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]: a = start_length a = eof_strings a = tokenizer def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]: a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCamelCase ) def __magic_name__ ( A : List[Any] ): '''simple docstring''' a = re.split("(%s)" % "|".join(A ), A ) # last string should be "" return "".join(string_list[:-2] ) def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ): '''simple docstring''' a = defaultdict(A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A ) ): with torch.no_grad(): a = batch["ids"].shape[-1] a = accelerator.unwrap_model(A ).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A ) # each task is generated batch_size times a = batch["task_id"].repeat(A ) a = accelerator.pad_across_processes( A, dim=1, pad_index=tokenizer.pad_token_id ) a , a = accelerator.gather((generated_tokens, generated_tasks) ) a = generated_tokens.cpu().numpy() a = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A, A ): gen_token_dict[task].append(A ) a = [[] for _ in range(A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A ) code_gens[task].append(remove_last_block(A ) ) return code_gens def __magic_name__ ( ): '''simple docstring''' a = HfArgumentParser(A ) a = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a = "false" if args.num_workers is None: a = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a = Accelerator() set_seed(args.seed, device_specific=A ) # Load model and tokenizer a = AutoTokenizer.from_pretrained(args.model_ckpt ) a = tokenizer.eos_token a = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ), } # Load evaluation dataset and metric a = load_dataset("openai_humaneval" ) a = load_metric("code_eval" ) a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) a = args.n_samples // args.batch_size a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A ) # do not confuse args.batch_size, which is actually the num_return_sequences a = DataLoader(A, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a = code_eval_metric.compute(references=[""], predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception a , a = accelerator.prepare(A, A ) a = complete_code( A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, ) if accelerator.is_main_process: a = [] for task in tqdm(range(A ) ): a = human_eval["test"][task]["test"] a = F"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric a , a = code_eval_metric.compute( references=A, predictions=A, num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, "w" ) as fp: json.dump(A, A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
662
0
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class A ( unittest.TestCase ): def __init__( self , snake_case_ , snake_case_=2 , snake_case_=5_6 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=2 , snake_case_=7 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , snake_case_="block_sparse" , snake_case_=True , snake_case_=False , snake_case_=2 , snake_case_=3 , ) -> str: _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_attention_mask _a = use_token_type_ids _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = type_sequence_label_size _a = initializer_range _a = num_choices _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks def __lowerCAmelCase ( self ) -> int: _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_attention_mask: _a = random_attention_mask([self.batch_size, self.seq_length] ) _a = None if self.use_token_type_ids: _a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.prepare_config_and_inputs() _a , _a , _a , _a = config_and_inputs _a = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class A ( a_ , unittest.TestCase ): __UpperCAmelCase : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __UpperCAmelCase : List[str] = False __UpperCAmelCase : Optional[Any] = False def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowerCAmelCase ( self ) -> Optional[Any]: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowerCAmelCase ( self ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowerCAmelCase ( self ) -> List[str]: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowerCAmelCase ( self ) -> Tuple: super().test_hidden_states_output() @slow def __lowerCAmelCase ( self ) -> Any: for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(snake_case_ ) def __lowerCAmelCase ( self ) -> int: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __lowerCAmelCase ( self ) -> List[str]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(snake_case_ , snake_case_ ) _a = model_class(snake_case_ ) @jax.jit def model_jitted(snake_case_ , snake_case_=None , **snake_case_ ): return model(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ ) with self.subTest("JIT Enabled" ): _a = model_jitted(**snake_case_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = model_jitted(**snake_case_ ).to_tuple() self.assertEqual(len(snake_case_ ) , len(snake_case_ ) ) for jitted_output, output in zip(snake_case_ , snake_case_ ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=1E-5 , snake_case_="outputs" , snake_case_=None ) -> List[str]: if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
131
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor UpperCAmelCase : Any = random.Random() def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None ): '''simple docstring''' if rng is None: lowerCamelCase = global_rng lowerCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=24 , A=24 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> str: '''simple docstring''' lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = min_seq_length lowerCamelCase = max_seq_length lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase = feature_size lowerCamelCase = num_mel_bins lowerCamelCase = padding_value lowerCamelCase = sampling_rate lowerCamelCase = return_attention_mask lowerCamelCase = do_normalize def __A ( self ) -> List[str]: '''simple docstring''' return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __A ( self , A=False , A=False ) -> Tuple: '''simple docstring''' def _flatten(A ): return list(itertools.chain(*A ) ) if equal_length: lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCamelCase = [np.asarray(A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase ( a_ , unittest.TestCase ): """simple docstring""" UpperCamelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None def __A ( self ) -> int: '''simple docstring''' lowerCamelCase = SpeechaTextFeatureExtractionTester(self ) def __A ( self , A ) -> List[Any]: '''simple docstring''' self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs] # Test feature size lowerCamelCase = feature_extractor(A , padding=A , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(A , A , atol=1e-3 ) ) # Test batched lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(A , A ): self.assertTrue(np.allclose(A , A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowerCamelCase = np.asarray(A ) lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(A , A ): self.assertTrue(np.allclose(A , A , atol=1e-3 ) ) def __A ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""] lowerCamelCase = [None, 16, None] for max_length, padding in zip(A , A ): lowerCamelCase = feature_extractor( A , padding=A , max_length=A , return_attention_mask=A ) lowerCamelCase = inputs.input_features lowerCamelCase = inputs.attention_mask lowerCamelCase = [np.sum(A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""] lowerCamelCase = [None, 16, None] for max_length, padding in zip(A , A ): lowerCamelCase = feature_extractor( A , max_length=A , padding=A , return_tensors="""np""" , return_attention_mask=A ) lowerCamelCase = inputs.input_features lowerCamelCase = inputs.attention_mask lowerCamelCase = [np.sum(A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def __A ( self ) -> int: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = feature_extractor( A , padding="""max_length""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , ) lowerCamelCase = inputs.input_features lowerCamelCase = inputs.attention_mask lowerCamelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def __A ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = feature_extractor( A , padding="""longest""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , ) lowerCamelCase = inputs.input_features lowerCamelCase = inputs.attention_mask lowerCamelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCamelCase = feature_extractor( A , padding="""longest""" , max_length=16 , truncation=A , return_tensors="""np""" , return_attention_mask=A , ) lowerCamelCase = inputs.input_features lowerCamelCase = inputs.attention_mask lowerCamelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def __A ( self ) -> Optional[int]: '''simple docstring''' import torch lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa ) lowerCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __A ( self , A ) -> Any: '''simple docstring''' from datasets import load_dataset lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech lowerCamelCase = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def __A ( self ) -> Any: '''simple docstring''' lowerCamelCase = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on lowerCamelCase = self._load_datasamples(1 ) lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase = feature_extractor(A , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 5_84, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
457
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]=None , lowerCamelCase : str=None , **lowerCamelCase : List[str] ) -> List[Any]: super().__init__(*lowerCamelCase , **lowerCamelCase ) lowerCAmelCase_ : int = eval_examples lowerCAmelCase_ : int = post_process_function def __lowercase ( self : str , lowerCamelCase : Optional[Dataset] = None , lowerCamelCase : Any=None , lowerCamelCase : Optional[List[str]] = None , lowerCamelCase : str = "eval" , **lowerCamelCase : Optional[int] , ) -> Dict[str, float]: lowerCAmelCase_ : List[Any] = gen_kwargs.copy() lowerCAmelCase_ : Optional[int] = ( gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length ) lowerCAmelCase_ : Dict = ( gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams ) lowerCAmelCase_ : int = gen_kwargs lowerCAmelCase_ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset lowerCAmelCase_ : Optional[int] = self.get_eval_dataloader(lowerCamelCase ) lowerCAmelCase_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase_ : Dict = self.compute_metrics lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : Dict = time.time() lowerCAmelCase_ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCAmelCase_ : Dict = eval_loop( lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , metric_key_prefix=lowerCamelCase , ) finally: lowerCAmelCase_ : int = compute_metrics lowerCAmelCase_ : List[str] = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( lowerCamelCase , lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCAmelCase_ : int = self.post_process_function(lowerCamelCase , lowerCamelCase , lowerCamelCase ) lowerCAmelCase_ : List[Any] = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): lowerCAmelCase_ : Any = metrics.pop(lowerCamelCase ) metrics.update(output.metrics ) else: lowerCAmelCase_ : Dict = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCamelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCAmelCase_ : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase ) return metrics def __lowercase ( self : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : str=None , lowerCamelCase : str = "test" , **lowerCamelCase : Optional[int] ) -> List[Any]: lowerCAmelCase_ : Tuple = gen_kwargs.copy() lowerCAmelCase_ : str = self.get_test_dataloader(lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase_ : Any = self.compute_metrics lowerCAmelCase_ : Tuple = None lowerCAmelCase_ : Optional[Any] = time.time() lowerCAmelCase_ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCAmelCase_ : str = eval_loop( lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , metric_key_prefix=lowerCamelCase , ) finally: lowerCAmelCase_ : Dict = compute_metrics lowerCAmelCase_ : Dict = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( lowerCamelCase , lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCAmelCase_ : str = self.post_process_function(lowerCamelCase , lowerCamelCase , lowerCamelCase , """predict""" ) lowerCAmelCase_ : Optional[Any] = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): lowerCAmelCase_ : List[str] = metrics.pop(lowerCamelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase )
707
'''simple docstring''' def UpperCamelCase_ ( A__ : int ): '''simple docstring''' if not isinstance(A__ , A__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) lowerCAmelCase_ : List[str] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
398
0
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase : def __init__( self : Dict , __snake_case : str , __snake_case : str=13 , __snake_case : Tuple=32 , __snake_case : int=2 , __snake_case : Any=3 , __snake_case : Any=16 , __snake_case : Optional[Any]=[1, 2, 1] , __snake_case : Union[str, Any]=[2, 2, 4] , __snake_case : Union[str, Any]=2 , __snake_case : Any=2.0 , __snake_case : int=True , __snake_case : int=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=False , __snake_case : int=True , __snake_case : str=0.02 , __snake_case : Dict=1E-5 , __snake_case : List[str]=True , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : int=10 , __snake_case : Any=8 , __snake_case : List[str]=["stage1", "stage2", "stage3"] , __snake_case : Tuple=[1, 2, 3] , ) -> Optional[int]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = patch_norm _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = is_training _lowerCAmelCase = scope _lowerCAmelCase = use_labels _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = encoder_stride _lowerCAmelCase = out_features _lowerCAmelCase = out_indices def lowercase__ ( self : List[str] ) -> Any: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Union[str, Any] ) -> Any: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase__ ( self : int , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Any: _lowerCAmelCase = MaskFormerSwinModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) _lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase__ ( self : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] ) -> Tuple: _lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__snake_case ): _lowerCAmelCase = ["""stem"""] _lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: Any = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _lowercase: List[str] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} _lowercase: Optional[Any] = False _lowercase: Optional[int] = False _lowercase: List[Any] = False _lowercase: Union[str, Any] = False _lowercase: int = False def lowercase__ ( self : Dict ) -> Optional[int]: _lowerCAmelCase = MaskFormerSwinModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowercase__ ( self : Union[str, Any] ) -> Any: pass def lowercase__ ( self : int ) -> Any: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : Union[str, Any] ) -> Tuple: return def lowercase__ ( self : Dict ) -> str: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase__ ( self : Dict ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowercase__ ( self : List[Any] ) -> Union[str, Any]: pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowercase__ ( self : Any ) -> Dict: pass def lowercase__ ( self : Any ) -> int: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def lowercase__ ( self : Union[str, Any] ) -> str: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowercase__ ( self : str ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowercase__ ( self : Tuple ) -> List[str]: pass def lowercase__ ( self : Any , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : str ) -> Tuple: _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) ) _lowerCAmelCase = outputs.hidden_states _lowerCAmelCase = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # Swin has a different seq_length _lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase__ ( self : int ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _lowerCAmelCase = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case ) def lowercase__ ( self : str ) -> Dict: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = 3 _lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _lowerCAmelCase = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowercase__ ( self : str ) -> Union[str, Any]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowercase__ ( self : str ) -> Union[str, Any]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowercase__ ( self : Any ) -> Union[str, Any]: pass def lowercase__ ( self : Optional[Any] ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__snake_case : Tuple ): _lowerCAmelCase = 0 return t def check_equivalence(__snake_case : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any]={} ): with torch.no_grad(): _lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case ) _lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple() def recursive_check(__snake_case : Optional[Any] , __snake_case : Tuple ): if isinstance(__snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ): recursive_check(__snake_case , __snake_case ) elif isinstance(__snake_case , __snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__snake_case , __snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:" f" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has" f" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}." ) , ) recursive_check(__snake_case , __snake_case ) for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} ) @require_torch class UpperCAmelCase ( unittest.TestCase , snake_case_ ): _lowercase: str = (MaskFormerSwinBackbone,) if is_torch_available() else () _lowercase: Optional[Any] = MaskFormerSwinConfig def lowercase__ ( self : List[Any] ) -> int: _lowerCAmelCase = MaskFormerSwinModelTester(self ) def lowercase__ ( self : Tuple ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: _lowerCAmelCase = backbone_class(__snake_case ) backbone.to(__snake_case ) backbone.eval() _lowerCAmelCase = backbone(**__snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _lowerCAmelCase = backbone(**__snake_case , output_hidden_states=__snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _lowerCAmelCase = backbone(**__snake_case , output_attentions=__snake_case ) self.assertIsNotNone(outputs.attentions )
207
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType A__ : Tuple =logging.get_logger(__name__) A__ : Any ={ '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class UpperCAmelCase ( snake_case_ ): _lowercase: Tuple = '''imagegpt''' _lowercase: Optional[int] = ['''past_key_values'''] _lowercase: str = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[int] , __snake_case : Optional[Any]=5_12 + 1 , __snake_case : Union[str, Any]=32 * 32 , __snake_case : List[Any]=5_12 , __snake_case : Any=24 , __snake_case : Optional[Any]=8 , __snake_case : List[Any]=None , __snake_case : str="quick_gelu" , __snake_case : List[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=1E-5 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=False , __snake_case : Dict=False , __snake_case : Union[str, Any]=False , **__snake_case : Union[str, Any] , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = scale_attn_by_inverse_layer_idx _lowerCAmelCase = reorder_and_upcast_attn _lowerCAmelCase = tie_word_embeddings super().__init__(tie_word_embeddings=__snake_case , **__snake_case ) class UpperCAmelCase ( snake_case_ ): @property def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ] ) def lowercase__ ( self : Union[str, Any] , __snake_case : "FeatureExtractionMixin" , __snake_case : int = 1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 3 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = dict(preprocessor(images=__snake_case , return_tensors=__snake_case ) ) return inputs
207
1
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _UpperCAmelCase( nn.Module ): lowercase__ = 42 lowercase__ = 42 lowercase__ = 0.0 lowercase__ = 1 lowercase__ = 1 lowercase__ = True lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = jnp.floataa def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = [] for i in range(self.num_layers): _UpperCamelCase = self.in_channels if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=__a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__a) _UpperCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__a) _UpperCamelCase = resnets _UpperCamelCase = attentions if self.add_downsample: _UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , __a , __a , __a , __a=True) -> Optional[int]: '''simple docstring''' _UpperCamelCase = () for resnet, attn in zip(self.resnets , self.attentions): _UpperCamelCase = resnet(__a , __a , deterministic=__a) _UpperCamelCase = attn(__a , __a , deterministic=__a) output_states += (hidden_states,) if self.add_downsample: _UpperCamelCase = self.downsamplers_a(__a) output_states += (hidden_states,) return hidden_states, output_states class _UpperCAmelCase( nn.Module ): lowercase__ = 42 lowercase__ = 42 lowercase__ = 0.0 lowercase__ = 1 lowercase__ = True lowercase__ = jnp.floataa def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = [] for i in range(self.num_layers): _UpperCamelCase = self.in_channels if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=__a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__a) _UpperCamelCase = resnets if self.add_downsample: _UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , __a , __a , __a=True) -> str: '''simple docstring''' _UpperCamelCase = () for resnet in self.resnets: _UpperCamelCase = resnet(__a , __a , deterministic=__a) output_states += (hidden_states,) if self.add_downsample: _UpperCamelCase = self.downsamplers_a(__a) output_states += (hidden_states,) return hidden_states, output_states class _UpperCAmelCase( nn.Module ): lowercase__ = 42 lowercase__ = 42 lowercase__ = 42 lowercase__ = 0.0 lowercase__ = 1 lowercase__ = 1 lowercase__ = True lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = jnp.floataa def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = [] for i in range(self.num_layers): _UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__a) _UpperCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__a) _UpperCamelCase = resnets _UpperCamelCase = attentions if self.add_upsample: _UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , __a , __a , __a , __a , __a=True) -> Dict: '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions): # pop res hidden states _UpperCamelCase = res_hidden_states_tuple[-1] _UpperCamelCase = res_hidden_states_tuple[:-1] _UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1) _UpperCamelCase = resnet(__a , __a , deterministic=__a) _UpperCamelCase = attn(__a , __a , deterministic=__a) if self.add_upsample: _UpperCamelCase = self.upsamplers_a(__a) return hidden_states class _UpperCAmelCase( nn.Module ): lowercase__ = 42 lowercase__ = 42 lowercase__ = 42 lowercase__ = 0.0 lowercase__ = 1 lowercase__ = True lowercase__ = jnp.floataa def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = [] for i in range(self.num_layers): _UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels _UpperCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__a) _UpperCamelCase = resnets if self.add_upsample: _UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , __a , __a , __a , __a=True) -> Any: '''simple docstring''' for resnet in self.resnets: # pop res hidden states _UpperCamelCase = res_hidden_states_tuple[-1] _UpperCamelCase = res_hidden_states_tuple[:-1] _UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1) _UpperCamelCase = resnet(__a , __a , deterministic=__a) if self.add_upsample: _UpperCamelCase = self.upsamplers_a(__a) return hidden_states class _UpperCAmelCase( nn.Module ): lowercase__ = 42 lowercase__ = 0.0 lowercase__ = 1 lowercase__ = 1 lowercase__ = False lowercase__ = False lowercase__ = jnp.floataa def UpperCAmelCase ( self) -> Dict: '''simple docstring''' # there is always at least one resnet _UpperCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _UpperCamelCase = [] for _ in range(self.num_layers): _UpperCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__a) _UpperCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__a) _UpperCamelCase = resnets _UpperCamelCase = attentions def __call__( self , __a , __a , __a , __a=True) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.resnets[0](__a , __a) for attn, resnet in zip(self.attentions , self.resnets[1:]): _UpperCamelCase = attn(__a , __a , deterministic=__a) _UpperCamelCase = resnet(__a , __a , deterministic=__a) return hidden_states
78
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _UpperCAmelCase: lowercase__ = MBartConfig lowercase__ = {} lowercase__ = 'gelu' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> Any: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = eos_token_id _UpperCamelCase = pad_token_id _UpperCamelCase = bos_token_id def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) _UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) _UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCamelCase = prepare_mbart_inputs_dict(__a , __a , __a) return config, inputs_dict def UpperCAmelCase ( self , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = TFMBartModel(config=__a).get_decoder() _UpperCamelCase = inputs_dict['''input_ids'''] _UpperCamelCase = input_ids[:1, :] _UpperCamelCase = inputs_dict['''attention_mask'''][:1, :] _UpperCamelCase = inputs_dict['''head_mask'''] _UpperCamelCase = 1 # first forward pass _UpperCamelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a) _UpperCamelCase , _UpperCamelCase = outputs.to_tuple() _UpperCamelCase = past_key_values[1] def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, ) -> Optional[int]: """simple docstring""" if attention_mask is None: _UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowercase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowercase__ = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowercase__ = True lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = TFMBartModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a) def UpperCAmelCase ( self) -> str: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase( unittest.TestCase ): lowercase__ = [ ' UN Chief Says There Is No Military Solution in Syria', ] lowercase__ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] lowercase__ = 'facebook/mbart-large-en-ro' @cached_property def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def UpperCAmelCase ( self , **__a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.translate_src_text(**__a) self.assertListEqual(self.expected_text , __a) def UpperCAmelCase ( self , **__a) -> Dict: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , **__a , return_tensors='''tf''') _UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2) _UpperCamelCase = self.tokenizer.batch_decode(__a , skip_special_tokens=__a) return generated_words @slow def UpperCAmelCase ( self) -> Any: '''simple docstring''' self._assert_generated_batch_equal_expected()
78
1
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> list[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =[0 for i in range(len(lowerCAmelCase_ ) )] # initialize interval's left pointer and right pointer SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =0, 0 for i in range(1 ,len(lowerCAmelCase_ ) ): # case when current index is inside the interval if i <= right_pointer: SCREAMING_SNAKE_CASE_ : Tuple =min(right_pointer - i + 1 ,z_result[i - left_pointer] ) SCREAMING_SNAKE_CASE_ : Tuple =min_edge while go_next(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =i, i + z_result[i] - 1 return z_result def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ,lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : str ) -> bool: """simple docstring""" return i + z_result[i] < len(lowerCAmelCase_ ) and s[z_result[i]] == s[i + z_result[i]] def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : str =0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string SCREAMING_SNAKE_CASE_ : Union[str, Any] =z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(lowerCAmelCase_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
220
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowerCAmelCase_ ( __A ): '''simple docstring''' _lowercase = 'Speech2TextFeatureExtractor' _lowercase = 'Speech2TextTokenizer' def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): super().__init__(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extractor SCREAMING_SNAKE_CASE_ : Optional[int] =False def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) SCREAMING_SNAKE_CASE_ : Any =kwargs.pop('raw_speech' ) else: SCREAMING_SNAKE_CASE_ : str =kwargs.pop('audio' , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =kwargs.pop('sampling_rate' , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Tuple =kwargs.pop('text' , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE_ : Dict =args[0] SCREAMING_SNAKE_CASE_ : Optional[Any] =args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE_ : str =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE_ : str =encodings['input_ids'] return inputs def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @contextmanager def __lowerCamelCase ( self ): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) SCREAMING_SNAKE_CASE_ : Optional[Any] =True SCREAMING_SNAKE_CASE_ : int =self.tokenizer yield SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor SCREAMING_SNAKE_CASE_ : Tuple =False
220
1
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class _lowerCamelCase (__lowerCamelCase ): _snake_case = "efficientnet" def __init__( self : str , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_0_0 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCamelCase_ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 2_5_6_0 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : Dict , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) _lowercase : Union[str, Any] = num_channels _lowercase : List[Any] = image_size _lowercase : str = width_coefficient _lowercase : Optional[Any] = depth_coefficient _lowercase : Union[str, Any] = depth_divisor _lowercase : Optional[int] = kernel_sizes _lowercase : Union[str, Any] = in_channels _lowercase : int = out_channels _lowercase : Optional[Any] = depthwise_padding _lowercase : Union[str, Any] = strides _lowercase : str = num_block_repeats _lowercase : List[Any] = expand_ratios _lowercase : str = squeeze_expansion_ratio _lowercase : int = hidden_act _lowercase : List[str] = hidden_dim _lowercase : int = pooling_type _lowercase : Optional[Any] = initializer_range _lowercase : Optional[int] = batch_norm_eps _lowercase : Tuple = batch_norm_momentum _lowercase : Dict = dropout_rate _lowercase : int = drop_connect_rate _lowercase : Dict = sum(lowerCamelCase_ ) * 4 class _lowerCamelCase (__lowerCamelCase ): _snake_case = version.parse("1.11" ) @property def __UpperCAmelCase ( self : List[str] ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __UpperCAmelCase ( self : Dict ): """simple docstring""" return 1E-5
283
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration SCREAMING_SNAKE_CASE = pytest.mark.integration SCREAMING_SNAKE_CASE = {'comet'} SCREAMING_SNAKE_CASE = importlib.util.find_spec('fairseq') is not None SCREAMING_SNAKE_CASE = {'code_eval'} SCREAMING_SNAKE_CASE = os.name == 'nt' SCREAMING_SNAKE_CASE = {'bertscore', 'frugalscore', 'perplexity'} SCREAMING_SNAKE_CASE = importlib.util.find_spec('transformers') is not None def __lowerCAmelCase( __UpperCAmelCase ): """simple docstring""" @wraps(__UpperCAmelCase ) def wrapper(self ,__UpperCAmelCase ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self ,__UpperCAmelCase ) return wrapper def __lowerCAmelCase( __UpperCAmelCase ): """simple docstring""" @wraps(__UpperCAmelCase ) def wrapper(self ,__UpperCAmelCase ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self ,__UpperCAmelCase ) return wrapper def __lowerCAmelCase( __UpperCAmelCase ): """simple docstring""" @wraps(__UpperCAmelCase ) def wrapper(self ,__UpperCAmelCase ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self ,__UpperCAmelCase ) return wrapper def __lowerCAmelCase( ): """simple docstring""" _lowercase : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @local class _lowerCamelCase (parameterized.TestCase ): _snake_case = {} _snake_case = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str] ): """simple docstring""" _lowercase : Optional[Any] = '[...]' _lowercase : str = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowerCamelCase_ ) ).module_path ) _lowercase : Dict = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase_ ) # check parameters _lowercase : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCamelCase_ , metric_module.__name__ ): with self.use_local_metrics(): try: _lowercase : Optional[Any] = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ): """simple docstring""" _lowercase : Optional[Any] = '[...]' _lowercase : Dict = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowerCamelCase_ ) ).module_path ) # run doctest with self.use_local_metrics(): _lowercase : str = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase_ ): yield else: yield @contextmanager def __UpperCAmelCase ( self : Dict ): """simple docstring""" def load_local_metric(lowerCamelCase_ : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ): return load_metric(os.path.join('metrics' , lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ ) with patch('datasets.load_metric' ) as mock_load_metric: _lowercase : str = load_local_metric yield @classmethod def __UpperCAmelCase ( cls : Tuple , lowerCamelCase_ : Tuple ): """simple docstring""" def wrapper(lowerCamelCase_ : int ): _lowercase : Any = contextmanager(lowerCamelCase_ ) _lowercase : Any = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def __lowerCAmelCase( __UpperCAmelCase ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' ,'' ,'' ) # handle pytest cli flags class _lowerCamelCase (__lowerCamelCase ): def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _lowercase : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def __lowerCAmelCase( __UpperCAmelCase ): """simple docstring""" import torch def bert_cos_score_idf(__UpperCAmelCase ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCAmelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _lowercase : Tuple = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def __lowerCAmelCase( __UpperCAmelCase ): """simple docstring""" def load_from_checkpoint(__UpperCAmelCase ): class _lowerCamelCase : def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ): """simple docstring""" assert len(lowerCamelCase_ ) == 2 _lowercase : Union[str, Any] = [0.19, 0.92] return scores, sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _lowercase : Dict = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _lowercase : str = load_from_checkpoint yield def __lowerCAmelCase( ): """simple docstring""" _lowercase : Tuple = load_metric(os.path.join('metrics' ,'seqeval' ) ) _lowercase : int = 'ERROR' _lowercase : Union[str, Any] = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__UpperCAmelCase ,match=re.escape(__UpperCAmelCase ) ): metric.compute(predictions=[] ,references=[] ,scheme=__UpperCAmelCase )
283
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : str = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) lowercase__ : Tuple = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowercase__ : Any = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowercase__ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) lowercase__ : Optional[int] = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) lowercase__ : Dict = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) lowercase__ : List[Any] = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) lowercase__ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) lowercase__ : List[Any] = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) lowercase__ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) lowercase__ : int = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) lowercase__ : str = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) lowercase__ : Union[str, Any] = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) lowercase__ : Optional[int] = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) lowercase__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowercase__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowercase__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowercase__ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowercase__ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowercase__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowercase__ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowercase__ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowercase__ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowercase__ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowercase__ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowercase__ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowercase__ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowercase__ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Any = FLAX_MODEL_MAPPING lowercase__ : Optional[Any] = auto_class_update(FlaxAutoModel) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowercase__ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowercase__ : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowercase__ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase__ : Tuple = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase__ : List[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowercase__ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase__ : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowercase__ : Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowercase__ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase__ : Optional[int] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowercase__ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class _UpperCAmelCase ( _BaseAutoModelClass): _lowerCAmelCase : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowercase__ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
123
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _UpperCAmelCase ( lowerCAmelCase__): def _snake_case ( self : int , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Any ): if tokenize_kwargs is None: snake_case_ : str = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) snake_case_ : int = truncation snake_case_ : Union[str, Any] = tokenize_kwargs snake_case_ : int = {} if return_tensors is not None: snake_case_ : str = return_tensors return preprocess_params, {}, postprocess_params def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , **lowercase_ : int ): snake_case_ : Union[str, Any] = self.framework snake_case_ : List[Any] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) return model_inputs def _snake_case ( self : Union[str, Any] , lowercase_ : Tuple ): snake_case_ : Union[str, Any] = self.model(**lowercase_ ) return model_outputs def _snake_case ( self : str , lowercase_ : str , lowercase_ : List[str]=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *lowercase_ : int , **lowercase_ : Dict ): return super().__call__(*lowercase_ , **lowercase_ )
123
1
"""simple docstring""" # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) A__ : Tuple = 'pytorch_model.bin' A__ : str = 'pytorch_model.bin.index.json' A__ : Optional[int] = 'adapter_config.json' A__ : Any = 'adapter_model.bin' A__ : List[str] = 'adapter_model.safetensors' A__ : str = 'tf_model.h5' A__ : Dict = 'tf_model.h5.index.json' A__ : List[Any] = 'model.ckpt' A__ : str = 'flax_model.msgpack' A__ : Any = 'flax_model.msgpack.index.json' A__ : Tuple = 'model.safetensors' A__ : List[Any] = 'model.safetensors.index.json' A__ : List[Any] = 'config.json' A__ : List[Any] = 'preprocessor_config.json' A__ : Optional[int] = FEATURE_EXTRACTOR_NAME A__ : Tuple = 'generation_config.json' A__ : List[str] = 'modelcard.json' A__ : str = '▁' A__ : Optional[int] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility A__ : int = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. A__ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] A__ : Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def _snake_case ( lowerCamelCase__ : int ) -> Any: if version.parse(snake_case__ ) < version.parse(snake_case__ ): if "dev" in min_version: lowerCamelCase_ : List[Any] =( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: lowerCamelCase_ : Optional[int] =F"""This example requires a minimum version of {min_version},""" error_message += F""" but the version found is {__version__}.\n""" raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
717
"""simple docstring""" from __future__ import annotations def _snake_case ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: if len(lowerCamelCase__ ) == 0: return False lowerCamelCase_ : Dict =len(lowerCamelCase__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , lowerCamelCase__ ) else: return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ ) if __name__ == "__main__": A__ : Tuple = input('Enter numbers separated by comma:\n').strip() A__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(',')] A__ : Optional[Any] = int(input('Enter the number to be found in the list:\n').strip()) A__ : str = '' if binary_search(sequence, target) else 'not ' print(f'{target} was {not_str}found in {sequence}')
244
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ): if index == number_of_items: return 0 lowerCAmelCase__ : Any = 0 lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[str] = knapsack(_A , _A , _A , _A , index + 1 ) if weights[index] <= max_weight: lowerCAmelCase__ : Optional[int] = values[index] + knapsack( _A , _A , _A , max_weight - weights[index] , index + 1 ) return max(_A , _A ) if __name__ == "__main__": import doctest doctest.testmod()
450
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def UpperCAmelCase_ ( _A , _A , _A , _A , _A = None , _A = None , _A = None , ): '''simple docstring''' if config_name_or_path is None: SCREAMING_SNAKE_CASE__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base''' if generator_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE__ = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration # Save model. SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE__ = gen_config SCREAMING_SNAKE_CASE__ = question_encoder_config SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator( _A , _A , config=_A ) rag_model.save_pretrained(_A ) # Sanity check. model_class.from_pretrained(_A ) # Save tokenizers. SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A ) gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' ) SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A ) question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : str = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
493
0
'''simple docstring''' from __future__ import annotations from math import pi def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
720
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) def UpperCAmelCase__ ( lowerCamelCase ): lowercase :int = r"\w+[.]\d+" lowercase :Tuple = re.findall(lowerCamelCase, lowerCamelCase ) for pat in pats: lowercase :List[str] = key.replace(lowerCamelCase, "_".join(pat.split("." ) ) ) return key def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowercase :List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase :Optional[int] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase :int = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase :List[str] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase :Union[str, Any] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase :List[Any] = pt_tensor.transpose(2, 3, 1, 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase :Optional[Any] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowercase :Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase :Tuple = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase :int = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=42 ): # Step 1: Convert pytorch tensor to numpy lowercase :Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase :str = flax_model.init_weights(PRNGKey(lowerCamelCase ) ) lowercase :Tuple = flatten_dict(lowerCamelCase ) lowercase :Optional[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase :List[Any] = rename_key(lowerCamelCase ) lowercase :List[Any] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowercase , lowercase :List[str] = rename_key_and_reshape_tensor(lowerCamelCase, lowerCamelCase, lowerCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowercase :List[str] = jnp.asarray(lowerCamelCase ) return unflatten_dict(lowerCamelCase )
453
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class a ( __lowerCAmelCase ): """simple docstring""" __lowerCAmelCase = """gptsan-japanese""" __lowerCAmelCase = [ """past_key_values""", ] __lowerCAmelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , snake_case_=3_6000 , snake_case_=1280 , snake_case_=1024 , snake_case_=8192 , snake_case_=4096 , snake_case_=128 , snake_case_=10 , snake_case_=0 , snake_case_=16 , snake_case_=16 , snake_case_=128 , snake_case_=0.0 , snake_case_=1e-5 , snake_case_=False , snake_case_=0.0 , snake_case_="float32" , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.0_0_2 , snake_case_=False , snake_case_=True , snake_case_=3_5998 , snake_case_=3_5995 , snake_case_=3_5999 , **snake_case_ , ): '''simple docstring''' __UpperCAmelCase: Optional[Any] = vocab_size __UpperCAmelCase: List[str] = max_position_embeddings __UpperCAmelCase: List[Any] = d_model __UpperCAmelCase: List[str] = d_ff __UpperCAmelCase: Union[str, Any] = d_ext __UpperCAmelCase: List[Any] = d_spout __UpperCAmelCase: Dict = num_switch_layers __UpperCAmelCase: List[str] = num_ext_layers __UpperCAmelCase: Tuple = num_switch_layers + num_ext_layers __UpperCAmelCase: Any = num_heads __UpperCAmelCase: Optional[Any] = num_experts __UpperCAmelCase: Tuple = expert_capacity __UpperCAmelCase: Tuple = dropout_rate __UpperCAmelCase: Optional[int] = layer_norm_epsilon __UpperCAmelCase: Union[str, Any] = router_bias __UpperCAmelCase: Optional[Any] = router_jitter_noise __UpperCAmelCase: str = router_dtype __UpperCAmelCase: Union[str, Any] = router_ignore_padding_tokens __UpperCAmelCase: Optional[int] = output_hidden_states __UpperCAmelCase: Optional[Any] = output_attentions __UpperCAmelCase: Any = initializer_factor __UpperCAmelCase: Tuple = output_router_logits __UpperCAmelCase: Tuple = use_cache super().__init__( separator_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
523
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__) def UpperCamelCase__ ( _lowercase : Any=2 , _lowercase : str=3 , _lowercase : List[str]=1_6 , _lowercase : int = 1_0 , _lowercase : int = 2 ) -> str: def get_dataset(_lowercase : Optional[Any] ): __UpperCAmelCase: List[str] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __UpperCAmelCase: Tuple = get_dataset(_lowercase ) __UpperCAmelCase: Dict = get_dataset(_lowercase ) __UpperCAmelCase: Dict = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) __UpperCAmelCase: Tuple = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) return (train_dataloader, valid_dataloader) def UpperCamelCase__ ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int=None ) -> Optional[int]: __UpperCAmelCase: Optional[int] = [] for epoch in range(_lowercase ): # Train quickly model.train() for batch in dataloader: __UpperCAmelCase, __UpperCAmelCase: Tuple = batch __UpperCAmelCase: List[str] = model(_lowercase ) __UpperCAmelCase: List[Any] = torch.nn.functional.mse_loss(_lowercase , _lowercase ) accelerator.backward(_lowercase ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class a ( nn.Module ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__() __UpperCAmelCase: List[Any] = nn.Parameter(torch.randn(1 ) ) __UpperCAmelCase: List[Any] = nn.Parameter(torch.randn(1 ) ) def lowercase_ ( self , snake_case_ ): '''simple docstring''' return x * self.a + self.b class a ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCAmelCase: List[Any] = DummyModel() __UpperCAmelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = dummy_dataloaders() __UpperCAmelCase: Dict = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ ) # Train baseline __UpperCAmelCase: Union[str, Any] = Accelerator(project_config=snake_case_ ) __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Tuple = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def lowercase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCAmelCase: Optional[int] = DummyModel() __UpperCAmelCase: List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase, __UpperCAmelCase: int = dummy_dataloaders() # Train baseline __UpperCAmelCase: Union[str, Any] = Accelerator() __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Dict = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Save initial __UpperCAmelCase: int = os.path.join(snake_case_ , """initial""" ) accelerator.save_state(snake_case_ ) ((__UpperCAmelCase), (__UpperCAmelCase)): List[Any] = model.a.item(), model.b.item() __UpperCAmelCase: int = optimizer.state_dict() __UpperCAmelCase: Union[str, Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ((__UpperCAmelCase), (__UpperCAmelCase)): Tuple = model.a.item(), model.b.item() __UpperCAmelCase: int = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCAmelCase: str = DummyModel() __UpperCAmelCase: str = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = dummy_dataloaders() __UpperCAmelCase: Optional[Any] = Accelerator() __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Optional[Any] = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.load_state(snake_case_ ) ((__UpperCAmelCase), (__UpperCAmelCase)): Any = model.a.item(), model.b.item() __UpperCAmelCase: int = optimizer.state_dict() self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) __UpperCAmelCase: Union[str, Any] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Save everything __UpperCAmelCase: Optional[int] = os.path.join(snake_case_ , """checkpoint""" ) accelerator.save_state(snake_case_ ) # Load everything back in and make sure all states work accelerator.load_state(snake_case_ ) test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ((__UpperCAmelCase), (__UpperCAmelCase)): str = model.a.item(), model.b.item() __UpperCAmelCase: int = optimizer.state_dict() self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) def lowercase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCAmelCase: List[Any] = DummyModel() __UpperCAmelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase, __UpperCAmelCase: Tuple = dummy_dataloaders() __UpperCAmelCase: Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ ) # Train baseline __UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ ) __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Save initial accelerator.save_state() ((__UpperCAmelCase), (__UpperCAmelCase)): Optional[int] = model.a.item(), model.b.item() __UpperCAmelCase: int = optimizer.state_dict() __UpperCAmelCase: List[Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ((__UpperCAmelCase), (__UpperCAmelCase)): List[Any] = model.a.item(), model.b.item() __UpperCAmelCase: str = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCAmelCase: List[str] = DummyModel() __UpperCAmelCase: Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase, __UpperCAmelCase: Dict = dummy_dataloaders() __UpperCAmelCase: Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ ) __UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ ) __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) ) ((__UpperCAmelCase), (__UpperCAmelCase)): Union[str, Any] = model.a.item(), model.b.item() __UpperCAmelCase: Optional[Any] = optimizer.state_dict() self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) __UpperCAmelCase: List[str] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ((__UpperCAmelCase), (__UpperCAmelCase)): str = model.a.item(), model.b.item() __UpperCAmelCase: Tuple = optimizer.state_dict() self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Dict = torch.tensor([1, 2, 3] ) __UpperCAmelCase: Tuple = torch.tensor([2, 3, 4] ) __UpperCAmelCase: List[str] = DummyModel() __UpperCAmelCase: int = torch.optim.Adam(net.parameters() ) __UpperCAmelCase: str = Accelerator() with self.assertRaises(snake_case_ ) as ve: accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase: Tuple = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def lowercase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCAmelCase: List[str] = DummyModel() __UpperCAmelCase: int = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __UpperCAmelCase: Optional[int] = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.9_9 ) __UpperCAmelCase, __UpperCAmelCase: Any = dummy_dataloaders() __UpperCAmelCase: Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ ) # Train baseline __UpperCAmelCase: List[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ ) __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Save initial accelerator.save_state() __UpperCAmelCase: Union[str, Any] = scheduler.state_dict() train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(snake_case_ , scheduler.state_dict() ) def lowercase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCAmelCase: Optional[int] = DummyModel() __UpperCAmelCase: Tuple = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 ) # Train baseline __UpperCAmelCase: Any = Accelerator(project_dir=snake_case_ , project_config=snake_case_ ) __UpperCAmelCase: List[Any] = accelerator.prepare(snake_case_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def lowercase_ ( self ): '''simple docstring''' __UpperCAmelCase: Union[str, Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case_ , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = '/tmp/accelerate/state_checkpointing' SCREAMING_SNAKE_CASE_ = DummyModel() SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters(), lr=1E-3) SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dummy_dataloaders() SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: SCREAMING_SNAKE_CASE_ = group['params'][0].device break assert param_device.type == accelerator.device.type SCREAMING_SNAKE_CASE_ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: SCREAMING_SNAKE_CASE_ = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: SCREAMING_SNAKE_CASE_ = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
523
1
'''simple docstring''' def lowerCAmelCase_ ( a : str , a : str = " " ): a__ = [] a__ = 0 for index, char in enumerate(a ): if char == separator: split_words.append(string[last_index:index] ) a__ = index + 1 elif index + 1 == len(a ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
126
'''simple docstring''' import qiskit def lowerCAmelCase_ ( a : int , a : int ): a__ = qiskit.Aer.get_backend('aer_simulator' ) a__ = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator a__ = qiskit.execute(a , a , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(a ) if __name__ == "__main__": __A : Union[str, Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
126
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase_ = False @skip_mps class UpperCAmelCase_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = StableDiffusionAttendAndExcitePipeline UpperCamelCase_ : Dict = False UpperCamelCase_ : Dict = TEXT_TO_IMAGE_PARAMS UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a ( cls : str )-> Optional[int]: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(a_ ) @classmethod def a ( cls : Any )-> List[Any]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(a_ ) def a ( self : List[str] )-> List[Any]: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , ) UpperCAmelCase_ : Tuple = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a_ , set_alpha_to_one=a_ , ) torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCAmelCase_ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) UpperCAmelCase_ : str = CLIPTextModel(a_ ) UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase_ : Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a ( self : Any , a_ : Any , a_ : Dict=0 )-> Any: """simple docstring""" if str(a_ ).startswith("""mps""" ): UpperCAmelCase_ : Tuple = torch.manual_seed(a_ ) else: UpperCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(a_ ) UpperCAmelCase_ : Optional[Any] = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a ( self : Any )-> Dict: """simple docstring""" UpperCAmelCase_ : Dict = """cpu""" UpperCAmelCase_ : Optional[Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(a_ ) UpperCAmelCase_ : Optional[Any] = pipe(**a_ ).images UpperCAmelCase_ : str = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) UpperCAmelCase_ : Union[str, Any] = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) UpperCAmelCase_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def a ( self : Any )-> List[Any]: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def a ( self : List[str] )-> Tuple: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a ( self : List[str] )-> Any: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def a ( self : Optional[int] )-> int: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def a ( self : List[str] )-> Any: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def a ( self : int )-> str: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def a ( self : Dict )-> str: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def a ( cls : Optional[Any] )-> Union[str, Any]: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(a_ ) @classmethod def a ( cls : Any )-> List[Any]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(a_ ) def a ( self : int )-> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Optional[int] )-> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = torch.manual_seed(51 ) UpperCAmelCase_ : Union[str, Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=a_ , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) UpperCAmelCase_ : Optional[Any] = """a painting of an elephant with glasses""" UpperCAmelCase_ : Any = [5, 7] UpperCAmelCase_ : Dict = pipe( prompt=a_ , token_indices=a_ , guidance_scale=7.5 , generator=a_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] UpperCAmelCase_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5E-1
470
"""simple docstring""" lowercase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowercase_ = [{"type": "code", "content": INSTALL_CONTENT}] lowercase_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
470
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Dict = '''cvt''' def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=[7, 3, 3] , __lowerCAmelCase=[4, 2, 2] , __lowerCAmelCase=[2, 1, 1] , __lowerCAmelCase=[64, 192, 384] , __lowerCAmelCase=[1, 3, 6] , __lowerCAmelCase=[1, 2, 10] , __lowerCAmelCase=[4.0, 4.0, 4.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.1] , __lowerCAmelCase=[True, True, True] , __lowerCAmelCase=[False, False, True] , __lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase=[3, 3, 3] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(**__lowerCAmelCase) lowerCAmelCase = num_channels lowerCAmelCase = patch_sizes lowerCAmelCase = patch_stride lowerCAmelCase = patch_padding lowerCAmelCase = embed_dim lowerCAmelCase = num_heads lowerCAmelCase = depth lowerCAmelCase = mlp_ratio lowerCAmelCase = attention_drop_rate lowerCAmelCase = drop_rate lowerCAmelCase = drop_path_rate lowerCAmelCase = qkv_bias lowerCAmelCase = cls_token lowerCAmelCase = qkv_projection_method lowerCAmelCase = kernel_qkv lowerCAmelCase = padding_kv lowerCAmelCase = stride_kv lowerCAmelCase = padding_q lowerCAmelCase = stride_q lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps
605
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __lowercase = threading.Lock() __lowercase = None __lowercase = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } __lowercase = logging.WARNING __lowercase = True def snake_case__ ( ) -> int: '''simple docstring''' lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , _A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def snake_case__ ( ) -> str: '''simple docstring''' return __name__.split(""".""" )[0] def snake_case__ ( ) -> logging.Logger: '''simple docstring''' return logging.getLogger(_get_library_name() ) def snake_case__ ( ) -> None: '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream. lowerCAmelCase = sys.stderr.flush # Apply our default configuration to the library root logger. lowerCAmelCase = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) lowerCAmelCase = False def snake_case__ ( ) -> None: '''simple docstring''' global _default_handler with _lock: if not _default_handler: return lowerCAmelCase = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) lowerCAmelCase = None def snake_case__ ( ) -> Dict: '''simple docstring''' return log_levels def snake_case__ ( _A: Optional[str] = None ) -> logging.Logger: '''simple docstring''' if name is None: lowerCAmelCase = _get_library_name() _configure_library_root_logger() return logging.getLogger(_A ) def snake_case__ ( ) -> int: '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def snake_case__ ( _A: int ) -> None: '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(_A ) def snake_case__ ( ) -> int: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> List[str]: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> Optional[int]: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> List[str]: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def snake_case__ ( _A: logging.Handler ) -> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(_A ) def snake_case__ ( _A: logging.Handler ) -> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(_A ) def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() lowerCAmelCase = False def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() lowerCAmelCase = True def snake_case__ ( ) -> None: '''simple docstring''' lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: lowerCAmelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" ) handler.setFormatter(_A ) def snake_case__ ( ) -> None: '''simple docstring''' lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(_A ) def snake_case__ ( self: str , *_A: Optional[int] , **_A: Dict ) -> str: '''simple docstring''' lowerCAmelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , _A ) if no_advisory_warnings: return self.warning(*_A , **_A ) __lowercase = warning_advice @functools.lru_cache(_A ) def snake_case__ ( self: List[str] , *_A: List[Any] , **_A: str ) -> List[str]: '''simple docstring''' self.warning(*_A , **_A ) __lowercase = warning_once class a__: '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument """simple docstring""" lowerCAmelCase = args[0] if args else None def __iter__( self): """simple docstring""" return iter(self._iterator) def __getattr__( self , __lowerCAmelCase): """simple docstring""" def empty_fn(*__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument return return empty_fn def __enter__( self): """simple docstring""" return self def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" return class a__: '''simple docstring''' def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase) else: return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() __lowercase = _tqdm_cls() def snake_case__ ( ) -> bool: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def snake_case__ ( ) -> Dict: '''simple docstring''' global _tqdm_active lowerCAmelCase = True hf_hub_utils.enable_progress_bars() def snake_case__ ( ) -> Any: '''simple docstring''' global _tqdm_active lowerCAmelCase = False hf_hub_utils.disable_progress_bars()
605
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) snake_case = logging.getLogger() snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : List[str] , UpperCAmelCase_ : int ): os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = {"source": "What is love ?", "target": "life"} SCREAMING_SNAKE_CASE : Tuple = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: SCREAMING_SNAKE_CASE : Dict = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(UpperCAmelCase_ , f'''{split}.{field}''' ) , "w" ) as f: f.write(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str = "pytorch" ): SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase_ , "output" ) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase_ , "data" ) self._create_dummy_data(data_dir=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = f''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(f'''--gpus={gpus}''' ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) SCREAMING_SNAKE_CASE : Any = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() ) SCREAMING_SNAKE_CASE : List[str] = os.path.join(UpperCAmelCase_ , "metrics.json" ) with open(UpperCAmelCase_ ) as f: SCREAMING_SNAKE_CASE : Dict = json.load(UpperCAmelCase_ ) return result @require_torch_gpu def _A ( self : int ): SCREAMING_SNAKE_CASE : Union[str, Any] = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Dict = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : List[str] = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _A ( self : int ): SCREAMING_SNAKE_CASE : int = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
62
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( snake_case__, unittest.TestCase ): _UpperCAmelCase :Dict = DDIMPipeline _UpperCAmelCase :List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCAmelCase :List[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } _UpperCAmelCase :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase :Tuple = False def UpperCAmelCase__ ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCamelCase_ : Tuple =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) lowerCamelCase_ : Union[str, Any] =DDIMScheduler() lowerCamelCase_ : int ={"unet": unet, "scheduler": scheduler} return components def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any=0 ): if str(snake_case__ ).startswith("mps" ): lowerCamelCase_ : Any =torch.manual_seed(snake_case__ ) else: lowerCamelCase_ : List[Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowerCamelCase_ : List[Any] ={ "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : List[Any] ="cpu" lowerCamelCase_ : List[Any] =self.get_dummy_components() lowerCamelCase_ : Union[str, Any] =self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCamelCase_ : Any =self.get_dummy_inputs(snake_case__ ) lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) lowerCamelCase_ : Optional[Any] =np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) lowerCamelCase_ : Dict =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case__ , 1E-3 ) def UpperCAmelCase__ ( self : List[Any] ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCAmelCase__ ( self : Dict ): super().test_save_load_local(expected_max_difference=3E-3 ) def UpperCAmelCase__ ( self : str ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def UpperCAmelCase__ ( self : str ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : Any ="google/ddpm-cifar10-32" lowerCamelCase_ : List[Any] =UNetaDModel.from_pretrained(snake_case__ ) lowerCamelCase_ : str =DDIMScheduler() lowerCamelCase_ : Optional[int] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ ) ddim.to(snake_case__ ) ddim.set_progress_bar_config(disable=snake_case__ ) lowerCamelCase_ : Optional[int] =torch.manual_seed(0 ) lowerCamelCase_ : str =ddim(generator=snake_case__ , eta=0.0 , output_type="numpy" ).images lowerCamelCase_ : int =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ : int =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : str ="google/ddpm-ema-bedroom-256" lowerCamelCase_ : Tuple =UNetaDModel.from_pretrained(snake_case__ ) lowerCamelCase_ : Dict =DDIMScheduler.from_pretrained(snake_case__ ) lowerCamelCase_ : str =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ ) ddpm.to(snake_case__ ) ddpm.set_progress_bar_config(disable=snake_case__ ) lowerCamelCase_ : int =torch.manual_seed(0 ) lowerCamelCase_ : Union[str, Any] =ddpm(generator=snake_case__ , output_type="numpy" ).images lowerCamelCase_ : Optional[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase_ : Tuple =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
153
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a : Optional[int] = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _lowercase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] ) __UpperCAmelCase : Any = "" __UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] ) __UpperCAmelCase : Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: __UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ ) raise ValueError(lowerCamelCase__ ) benchmark.run() if __name__ == "__main__": main()
10
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ,unittest.TestCase ): _UpperCAmelCase : Tuple = CanineTokenizer _UpperCAmelCase : str = False def __lowerCamelCase ( self : List[Any] ) ->Optional[int]: super().setUp() lowerCamelCase__ : str = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]: return CanineTokenizer.from_pretrained('''google/canine-s''' ) def __lowerCamelCase ( self : Optional[Any] , **A : List[Any] ) ->CanineTokenizer: lowerCamelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ ) lowerCamelCase__ : Optional[Any] = 1_0_2_4 return tokenizer @require_torch def __lowerCamelCase ( self : str ) ->Any: lowerCamelCase__ : int = self.canine_tokenizer lowerCamelCase__ : str = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.'''] # fmt: off lowerCamelCase__ : str = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0] # fmt: on lowerCamelCase__ : Any = tokenizer(A_ , padding=A_ , return_tensors='''pt''' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase__ : List[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A_ , A_ ) self.assertEqual((2, 3_9) , batch.input_ids.shape ) self.assertEqual((2, 3_9) , batch.attention_mask.shape ) @require_torch def __lowerCamelCase ( self : str ) ->str: lowerCamelCase__ : int = self.canine_tokenizer lowerCamelCase__ : Optional[int] = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.'''] lowerCamelCase__ : List[str] = tokenizer(A_ , padding=A_ , return_tensors='''pt''' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('''input_ids''' , A_ ) self.assertIn('''attention_mask''' , A_ ) self.assertIn('''token_type_ids''' , A_ ) @require_torch def __lowerCamelCase ( self : List[Any] ) ->int: lowerCamelCase__ : Optional[int] = self.canine_tokenizer lowerCamelCase__ : int = [ '''What\'s the weater?''', '''It\'s about 25 degrees.''', ] lowerCamelCase__ : Tuple = tokenizer( text_target=A_ , max_length=3_2 , padding='''max_length''' , truncation=A_ , return_tensors='''pt''' ) self.assertEqual(3_2 , targets['''input_ids'''].shape[1] ) def __lowerCamelCase ( self : List[Any] ) ->Tuple: # safety check on max_len default value so we are sure the test works lowerCamelCase__ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test lowerCamelCase__ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase__ : List[Any] = tempfile.mkdtemp() lowerCamelCase__ : str = ''' He is very happy, UNwant\u00E9d,running''' lowerCamelCase__ : List[Any] = tokenizer.encode(A_ , add_special_tokens=A_ ) tokenizer.save_pretrained(A_ ) lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(A_ ) lowerCamelCase__ : int = after_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) shutil.rmtree(A_ ) lowerCamelCase__ : Tuple = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase__ : List[str] = tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running''' lowerCamelCase__ : Union[str, Any] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: lowerCamelCase__ : str = chr(0xE_0_0_7 ) additional_special_tokens.append(A_ ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowerCamelCase__ : Any = tokenizer.encode(A_ , add_special_tokens=A_ ) tokenizer.save_pretrained(A_ ) lowerCamelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(A_ ) lowerCamelCase__ : int = after_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) self.assertIn(A_ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(A_ , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(A_ ) def __lowerCamelCase ( self : Dict ) ->Optional[int]: lowerCamelCase__ : Optional[Any] = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_clean_sequence(A_ ) # a special token for Canine can be defined as follows: lowerCamelCase__ : List[Any] = 0xE_0_0_5 lowerCamelCase__ : Tuple = chr(A_ ) tokenizer.add_special_tokens({'''cls_token''': special_token} ) lowerCamelCase__ : Optional[int] = tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertEqual(len(A_ ) , 1 ) lowerCamelCase__ : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A_ ) lowerCamelCase__ : Any = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase__ : str = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase__ : List[Any] = tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertEqual(A_ , input_encoded + special_token_id ) lowerCamelCase__ : List[str] = tokenizer.decode(A_ , skip_special_tokens=A_ ) self.assertTrue(special_token not in decoded ) def __lowerCamelCase ( self : str ) ->Union[str, Any]: lowerCamelCase__ : List[str] = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCamelCase__ : Optional[int] = chr(0xE_0_0_5 ) lowerCamelCase__ : int = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} ) lowerCamelCase__ : List[str] = tokenizer.tokenize(A_ ) lowerCamelCase__ : Dict = tokenizer.tokenize(A_ ) self.assertEqual(len(A_ ) , 1 ) self.assertEqual(len(A_ ) , 1 ) self.assertEqual(token_a[0] , A_ ) self.assertEqual(token_a[0] , A_ ) @require_tokenizers def __lowerCamelCase ( self : Optional[Any] ) ->Union[str, Any]: lowerCamelCase__ : Dict = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # a special token for Canine can be defined as follows: lowerCamelCase__ : Dict = 0xE_0_0_6 lowerCamelCase__ : Dict = chr(A_ ) lowerCamelCase__ : List[Any] = AddedToken(A_ , lstrip=A_ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(A_ ) tokenizer.from_pretrained(A_ ) def __lowerCamelCase ( self : List[Any] ) ->int: lowerCamelCase__ : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A_ ) with open(os.path.join(A_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: lowerCamelCase__ : Dict = json.load(A_ ) with open(os.path.join(A_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: lowerCamelCase__ : List[Any] = json.load(A_ ) # a special token for Canine can be defined as follows: lowerCamelCase__ : Optional[Any] = 0xE_0_0_6 lowerCamelCase__ : str = chr(A_ ) lowerCamelCase__ : Optional[int] = [new_token_a] lowerCamelCase__ : Optional[int] = [new_token_a] with open(os.path.join(A_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(A_ , A_ ) with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(A_ , A_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCamelCase__ : Optional[int] = tokenizer_class.from_pretrained(A_ , extra_ids=0 ) self.assertIn(A_ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) lowerCamelCase__ : Dict = 0xE_0_0_7 lowerCamelCase__ : Any = chr(A_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCamelCase__ : int = [AddedToken(A_ , lstrip=A_ )] lowerCamelCase__ : Tuple = tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , extra_ids=0 ) self.assertIn(A_ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def __lowerCamelCase ( self : List[Any] ) ->int: lowerCamelCase__ : Optional[Any] = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCamelCase__ : Any = '''hello world''' if self.space_between_special_tokens: lowerCamelCase__ : Optional[Any] = '''[CLS] hello world [SEP]''' else: lowerCamelCase__ : List[str] = input lowerCamelCase__ : Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase__ : Optional[Any] = tokenizer.decode(A_ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(A_ , [output, output.lower()] ) def __lowerCamelCase ( self : int ) ->Any: lowerCamelCase__ : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCamelCase__ : List[Any] = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] lowerCamelCase__ : List[str] = '''a''' lowerCamelCase__ : List[str] = ord(A_ ) for attr in attributes_list: setattr(A_ , attr + '''_id''' , A_ ) self.assertEqual(getattr(A_ , A_ ) , A_ ) self.assertEqual(getattr(A_ , attr + '''_id''' ) , A_ ) setattr(A_ , attr + '''_id''' , A_ ) self.assertEqual(getattr(A_ , A_ ) , A_ ) self.assertEqual(getattr(A_ , attr + '''_id''' ) , A_ ) setattr(A_ , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens_ids''' ) , [] ) lowerCamelCase__ : List[str] = 0xE_0_0_6 lowerCamelCase__ : Tuple = chr(A_ ) setattr(A_ , '''additional_special_tokens_ids''' , [additional_special_token_id] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens''' ) , [additional_special_token] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] ) def __lowerCamelCase ( self : str ) ->Tuple: pass def __lowerCamelCase ( self : Dict ) ->Any: pass def __lowerCamelCase ( self : Tuple ) ->Union[str, Any]: pass def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]: pass def __lowerCamelCase ( self : List[Any] ) ->Tuple: pass def __lowerCamelCase ( self : List[str] ) ->List[Any]: pass def __lowerCamelCase ( self : Union[str, Any] ) ->Any: pass def __lowerCamelCase ( self : Optional[int] ) ->List[Any]: pass
315
'''simple docstring''' import re import string import numpy as np import datasets UpperCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' UpperCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' UpperCAmelCase = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case( datasets.Metric ): '''simple docstring''' def __snake_case ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def __snake_case ( self , A_ , A_ , A_=None , A_=False , A_=False , A_=False , ) -> List[Any]: if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCAmelCase = np.array([re.sub(A_ , """""" , A_ ) for x in predictions] ) lowerCAmelCase = np.array([re.sub(A_ , """""" , A_ ) for x in references] ) else: lowerCAmelCase = np.asarray(A_ ) lowerCAmelCase = np.asarray(A_ ) if ignore_case: lowerCAmelCase = np.char.lower(A_ ) lowerCAmelCase = np.char.lower(A_ ) if ignore_punctuation: lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation ) lowerCAmelCase = np.char.translate(A_ , table=A_ ) lowerCAmelCase = np.char.translate(A_ , table=A_ ) if ignore_numbers: lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits ) lowerCAmelCase = np.char.translate(A_ , table=A_ ) lowerCAmelCase = np.char.translate(A_ , table=A_ ) lowerCAmelCase = predictions == references return {"exact_match": np.mean(A_ ) * 100}
433
0
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" def _snake_case ( self , _SCREAMING_SNAKE_CASE )->float: '''simple docstring''' return 0.0 def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Optional[int] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) A_ : int = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : List[str] = 512 A_ : List[Any] = [1] + [0] * (size - 1) A_ : str = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs] A_ : str = [0] * (samplerate - size) # zero-padding outputs += filler A_ : Tuple = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) ) A_ : Any = 20 * np.logaa(SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds A_ : Optional[Any] = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(SCREAMING_SNAKE_CASE ) plt.show() def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Dict = 512 A_ : Tuple = [1] + [0] * (size - 1) A_ : Union[str, Any] = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs] A_ : Optional[int] = [0] * (samplerate - size) # zero-padding outputs += filler A_ : Dict = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
152
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = None class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ): """simple docstring""" snake_case = 2 @register_to_config def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Optional[Any]: '''simple docstring''' A_ : Tuple = sigma_max # setable values A_ : int = None A_ : np.IntTensor = None A_ : torch.FloatTensor = None # sigma(t_i) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[Any]: '''simple docstring''' A_ : int = num_inference_steps A_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) A_ : Any = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A_ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A_ : Tuple = 0 # sample eps ~ N(0, S_noise^2 * I) A_ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=_SCREAMING_SNAKE_CASE ).to(sample.device ) A_ : Any = sigma + gamma * sigma A_ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]: '''simple docstring''' A_ : Dict = sample_hat + sigma_hat * model_output A_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat A_ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]: '''simple docstring''' A_ : Any = sample_prev + sigma_prev * model_output A_ : str = (sample_prev - pred_original_sample) / sigma_prev A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple: '''simple docstring''' raise NotImplementedError()
152
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Union[str, Any] = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : int = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class UpperCAmelCase__ ( A__ ): """simple docstring""" a = "deta" a = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : str , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=900 , __lowerCamelCase : int=2048 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : str=6 , __lowerCamelCase : int=1024 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any="relu" , __lowerCamelCase : int=256 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]="sine" , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : int=4 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=300 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : str=2 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Dict=5 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[str]=0.25 , **__lowerCamelCase : Any , ) -> List[Any]: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE__ = backbone_config.pop('''model_type''' ) SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE__ = config_class.from_dict(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = backbone_config SCREAMING_SNAKE_CASE__ = num_queries SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = d_model SCREAMING_SNAKE_CASE__ = encoder_ffn_dim SCREAMING_SNAKE_CASE__ = encoder_layers SCREAMING_SNAKE_CASE__ = encoder_attention_heads SCREAMING_SNAKE_CASE__ = decoder_ffn_dim SCREAMING_SNAKE_CASE__ = decoder_layers SCREAMING_SNAKE_CASE__ = decoder_attention_heads SCREAMING_SNAKE_CASE__ = dropout SCREAMING_SNAKE_CASE__ = attention_dropout SCREAMING_SNAKE_CASE__ = activation_dropout SCREAMING_SNAKE_CASE__ = activation_function SCREAMING_SNAKE_CASE__ = init_std SCREAMING_SNAKE_CASE__ = init_xavier_std SCREAMING_SNAKE_CASE__ = encoder_layerdrop SCREAMING_SNAKE_CASE__ = auxiliary_loss SCREAMING_SNAKE_CASE__ = position_embedding_type # deformable attributes SCREAMING_SNAKE_CASE__ = num_feature_levels SCREAMING_SNAKE_CASE__ = encoder_n_points SCREAMING_SNAKE_CASE__ = decoder_n_points SCREAMING_SNAKE_CASE__ = two_stage SCREAMING_SNAKE_CASE__ = two_stage_num_proposals SCREAMING_SNAKE_CASE__ = with_box_refine SCREAMING_SNAKE_CASE__ = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE__ = class_cost SCREAMING_SNAKE_CASE__ = bbox_cost SCREAMING_SNAKE_CASE__ = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ = mask_loss_coefficient SCREAMING_SNAKE_CASE__ = dice_loss_coefficient SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient SCREAMING_SNAKE_CASE__ = giou_loss_coefficient SCREAMING_SNAKE_CASE__ = eos_coefficient SCREAMING_SNAKE_CASE__ = focal_alpha super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def lowercase_ ( self : Dict ) -> int: return self.encoder_attention_heads @property def lowercase_ ( self : Optional[int] ) -> int: return self.d_model def lowercase_ ( self : Union[str, Any] ) -> Dict: SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
721
def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [] for data in source_data: for i, el in enumerate(_A ): if len(_A ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_A ) ) return data_lists def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [] for dlist, weight in zip(_A , _A ): SCREAMING_SNAKE_CASE__ = min(_A ) SCREAMING_SNAKE_CASE__ = max(_A ) SCREAMING_SNAKE_CASE__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided''' raise ValueError(_A ) score_lists.append(_A ) return score_lists def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_A ): SCREAMING_SNAKE_CASE__ = final_scores[j] + ele return final_scores def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = get_data(_A ) SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A ) SCREAMING_SNAKE_CASE__ = generate_final_scores(_A ) # append scores to source data for i, ele in enumerate(_A ): source_data[i].append(_A ) return source_data
472
0
from ....configuration_utils import PretrainedConfig from ....utils import logging SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Any = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "van" def __init__( self : int ,A : str=2_24 ,A : int=3 ,A : Optional[Any]=[7, 3, 3, 3] ,A : Tuple=[4, 2, 2, 2] ,A : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,A : Optional[int]=[3, 3, 12, 3] ,A : Optional[int]=[8, 8, 4, 4] ,A : Any="gelu" ,A : Union[str, Any]=0.02 ,A : Union[str, Any]=1E-6 ,A : Dict=1E-2 ,A : str=0.0 ,A : Optional[int]=0.0 ,**A : List[Any] ,): super().__init__(**A ) __A = image_size __A = num_channels __A = patch_sizes __A = strides __A = hidden_sizes __A = depths __A = mlp_ratios __A = hidden_act __A = initializer_range __A = layer_norm_eps __A = layer_scale_init_value __A = drop_path_rate __A = dropout_rate
55
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __magic_name__ : Optional[int] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[str] = ['''pixel_values'''] def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ): super().__init__(**lowerCamelCase ) _snake_case = size if size is not None else {"shortest_edge": 256} _snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase ) _snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224} _snake_case = get_size_dict(lowerCamelCase ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = do_center_crop _snake_case = crop_size _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_normalize _snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ): _snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase ) return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ): _snake_case = get_size_dict(lowerCamelCase ) return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase ) def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ): return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ): return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase ) def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ): _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = size if size is not None else self.size _snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase ) _snake_case = resample if resample is not None else self.resample _snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case = crop_size if crop_size is not None else self.crop_size _snake_case = get_size_dict(lowerCamelCase ) _snake_case = do_rescale if do_rescale is not None else self.do_rescale _snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case = do_normalize if do_normalize is not None else self.do_normalize _snake_case = image_mean if image_mean is not None else self.image_mean _snake_case = image_std if image_std is not None else self.image_std _snake_case = make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _snake_case = [to_numpy_array(lowerCamelCase ) for image in images] if do_resize: _snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images] if do_center_crop: _snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images] if do_rescale: _snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images] if do_normalize: _snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images] _snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images] _snake_case = {"pixel_values": images} return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
672
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Tuple = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys _UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
706
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class lowercase : __SCREAMING_SNAKE_CASE : int __SCREAMING_SNAKE_CASE : Node | None = None __SCREAMING_SNAKE_CASE : Node | None = None def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = Node(1 ) snake_case_ = Node(2 ) snake_case_ = Node(3 ) snake_case_ = Node(4 ) snake_case_ = Node(5 ) return tree def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] if root is None: return output snake_case_ = deque([root] ) while process_queue: snake_case_ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] def populate_output(UpperCamelCase__ , UpperCamelCase__ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(UpperCamelCase__ , UpperCamelCase__ ) return output def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] def populate_output(UpperCamelCase__ , UpperCamelCase__ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(UpperCamelCase__ , UpperCamelCase__ ) return output def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if root is None: return [] snake_case_ = [] snake_case_ = 0 snake_case_ = height(UpperCamelCase__ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = 1 else: output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = 0 return output def __lowerCamelCase ( ): # Main function for testing. '''simple docstring''' snake_case_ = make_tree() print(F'''In-order Traversal: {inorder(UpperCamelCase__ )}''' ) print(F'''Pre-order Traversal: {preorder(UpperCamelCase__ )}''' ) print(F'''Post-order Traversal: {postorder(UpperCamelCase__ )}''' , '\n' ) print(F'''Height of Tree: {height(UpperCamelCase__ )}''' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(UpperCamelCase__ ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(UpperCamelCase__ ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) ) print('\nZigZag order Traversal: ' ) print(zigzag(UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
108
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> list: """simple docstring""" def merge(_UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_SCREAMING_SNAKE_CASE ) <= 1: return collection _SCREAMING_SNAKE_CASE =len(_SCREAMING_SNAKE_CASE ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase : Tuple = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
405
from collections.abc import Callable def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" _A = a _A = b if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function return a elif function(_SCREAMING_SNAKE_CASE ) == 0: return b elif ( function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_SCREAMING_SNAKE_CASE ) == 0: return mid elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
27
0
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __lowerCamelCase ( _UpperCamelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''' ) UpperCAmelCase_ = json.loads(open(_UpperCamelCase ).read() ) if not params: raise ValueError( F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith('''.pt''' ): UpperCAmelCase_ = args.output + '''.pt''' UpperCAmelCase_ = OrderedDict() with tf.device('''/CPU:0''' ): UpperCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir ) UpperCAmelCase_ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): UpperCAmelCase_ = reader.get_tensor(_UpperCamelCase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): UpperCAmelCase_ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): UpperCAmelCase_ = 8 UpperCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.startswith('''model/moe''' ): UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/softmlp/kernel''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): UpperCAmelCase_ = key_name[-9:-7] for i in range(16 ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) UpperCAmelCase_ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.startswith('''model/mlp''' ): UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/p1/bias''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/p2/kernel''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/p2/bias''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.startswith('''model/ln''' ): UpperCAmelCase_ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/g''' ): UpperCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.startswith('''model/att''' ): UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): UpperCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum UpperCAmelCase_ = state[:, 0, :, :] UpperCAmelCase_ = state[:, 1, :, :] UpperCAmelCase_ = state[:, 2, :, :] UpperCAmelCase_ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/o/kernel''' ): UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player UpperCAmelCase_ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.startswith('''model/an''' ): UpperCAmelCase_ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): UpperCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.endswith('''/g''' ): UpperCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): UpperCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] UpperCAmelCase_ = '''model.%s.weight''' % nlayer UpperCAmelCase_ = vnp.copy() # same in embedded UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) if key_name.startswith('''model/wte''' ): UpperCAmelCase_ = '''lm_head.weight''' UpperCAmelCase_ = vnp.copy() # same in embedded UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name.startswith('''model/wob''' ): UpperCAmelCase_ = '''final_logits_bias''' UpperCAmelCase_ = vnp.copy() # same in embedded UpperCAmelCase_ = state.reshape((1, -1) ) UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name == "model/dense/kernel": UpperCAmelCase_ = '''model.last_project.weight''' UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) elif key_name == "model/dense_1/bias": UpperCAmelCase_ = '''model.last_project.bias''' UpperCAmelCase_ = vnp.copy() # same because it is one dimensional UpperCAmelCase_ = torch.tensor(_UpperCamelCase ) torch.save(_UpperCamelCase , args.output ) if __name__ == "__main__": lowercase__ : int = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") lowercase__ : List[str] = parser.parse_args() convert_tf_gptsan_to_pt(args)
708
'''simple docstring''' import re def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = split_input(_UpperCamelCase ) if upper: UpperCAmelCase_ = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase_ = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return to_simple_case(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = to_simple_case(_UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__("doctest").testmod()
43
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCamelCase : def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=9_9 , lowerCamelCase__=0 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=0 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_lengths A__ = use_token_type_ids A__ = use_labels A__ = gelu_activation A__ = sinusoidal_embeddings A__ = causal A__ = asm A__ = n_langs A__ = vocab_size A__ = n_special A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = summary_type A__ = use_proj A__ = scope A__ = bos_token_id def A (self ): """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_input_lengths: A__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = ids_tensor([self.batch_size] , 2 ).float() A__ = ids_tensor([self.batch_size] , self.num_choices ) A__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def A (self ): """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = XLMModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = model(lowerCamelCase__ , lengths=lowerCamelCase__ , langs=lowerCamelCase__ ) A__ = model(lowerCamelCase__ , langs=lowerCamelCase__ ) A__ = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = XLMWithLMHeadModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = XLMForQuestionAnsweringSimple(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = model(lowerCamelCase__ ) A__ = model(lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ ) A__ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = XLMForQuestionAnswering(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = model(lowerCamelCase__ ) A__ = model( lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , cls_index=lowerCamelCase__ , is_impossible=lowerCamelCase__ , p_mask=lowerCamelCase__ , ) A__ = model( lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , cls_index=lowerCamelCase__ , is_impossible=lowerCamelCase__ , ) ((A__) ,) = result_with_labels.to_tuple() A__ = model(lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ ) ((A__) ,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = XLMForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = model(lowerCamelCase__ ) A__ = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = self.num_labels A__ = XLMForTokenClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" A__ = self.num_choices A__ = XLMForMultipleChoice(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A (self ): """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) ,( A__ ) ,( A__ ) ,( A__ ) ,( A__ ) ,( A__ ) ,( A__ ) ,( A__ ) ,( A__ ) , ) = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class _UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase): __lowerCamelCase = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __lowerCamelCase = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" A__ = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) return inputs_dict def A (self ): """simple docstring""" A__ = XLMModelTester(self ) A__ = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=3_7 ) def A (self ): """simple docstring""" self.config_tester.run_common_tests() def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase__ ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=1 ): """simple docstring""" self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual( [isinstance(lowerCamelCase__ , lowerCamelCase__ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase__ ) ) self.assertEqual(len(lowerCamelCase__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCamelCase__ ): # adds PAD dummy token A__ = min_length + idx + 1 A__ = min_length + idx + 1 A__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase__ ) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=1 ): """simple docstring""" self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual( [isinstance(lowerCamelCase__ , lowerCamelCase__ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase__ ) , ) self.assertEqual(len(lowerCamelCase__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCamelCase__ ): # adds PAD dummy token A__ = min_length + idx + 1 A__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase__ ) , ) pass @slow def A (self ): """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = XLMModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class _UpperCamelCase ( unittest.TestCase): @slow def A (self ): """simple docstring""" A__ = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(lowerCamelCase__ ) A__ = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCamelCase__ ) # the president A__ = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase__ )
574
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowerCamelCase__ = "src/diffusers" # Matches is_xxx_available() lowerCamelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla lowerCamelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") lowerCamelCase__ = "\n{0} = None\n" lowerCamelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" lowerCamelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ): A__ = _re_backend.findall(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None return "_and_".join(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( ): with open(os.path.join(UpperCamelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: A__ = f.readlines() # Get to the point we do the actual imports for type checking A__ = 0 A__ = {} # Go through the end of the file while line_index < len(UpperCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block A__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 A__ = [] # Until we unindent, add backend objects to the list while line_index < len(UpperCamelCase ) and len(lines[line_index] ) > 1: A__ = lines[line_index] A__ = _re_single_line_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(UpperCamelCase ) > 0: A__ = objects else: line_index += 1 return backend_specific_objects def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ): if name.isupper(): return DUMMY_CONSTANT.format(UpperCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(UpperCamelCase , UpperCamelCase ) else: return DUMMY_CLASS.format(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any=None ): if backend_specific_objects is None: A__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename A__ = {} for backend, objects in backend_specific_objects.items(): A__ = """[""" + """, """.join(F"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]""" A__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(UpperCamelCase , UpperCamelCase ) for o in objects] ) A__ = dummy_file return dummy_files def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str]=False ): A__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py A__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. A__ = os.path.join(UpperCamelCase , """utils""" ) A__ = { backend: os.path.join(UpperCamelCase , F"""dummy_{short_names.get(UpperCamelCase , UpperCamelCase )}_objects.py""" ) for backend in dummy_files.keys() } A__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(UpperCamelCase ): with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: A__ = f.read() else: A__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F"""Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase , UpperCamelCase )}_objects.py as the main """ """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F"""diffusers.utils.dummy_{short_names.get(UpperCamelCase , UpperCamelCase )}_objects.py. Run `make fix-copies` """ """to fix this.""" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowerCamelCase__ = parser.parse_args() check_dummies(args.fix_and_overwrite)
574
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ : Optional[int] =logging.get_logger(__name__) UpperCAmelCase__ : Optional[Any] =[ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _lowercase ( _UpperCAmelCase ) -> int: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowerCamelCase =k.replace(_UpperCAmelCase , _UpperCAmelCase ) if k.startswith("""encoder""" ): lowerCamelCase =k.replace(""".attn""" , """.self_attn""" ) lowerCamelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) lowerCamelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): lowerCamelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) lowerCamelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) lowerCamelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def _lowercase ( _UpperCAmelCase ) -> Optional[Any]: lowerCamelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: lowerCamelCase =sd.pop(_UpperCAmelCase ) lowerCamelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd lowerCamelCase =v UpperCAmelCase__ : int =['''START'''] @torch.no_grad() def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" ) lowerCamelCase =model["""model"""] lowerCamelCase =BlenderbotConfig.from_json_file(_UpperCAmelCase ) lowerCamelCase =BlenderbotForConditionalGeneration(_UpperCAmelCase ) lowerCamelCase =m.model.state_dict().keys() lowerCamelCase =[] lowerCamelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowerCamelCase =rename_state_dict_key(_UpperCAmelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: lowerCamelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(_UpperCAmelCase ) m.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) m.half() m.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) UpperCAmelCase__ : List[str] =parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
705
from math import sqrt def _lowercase ( _UpperCAmelCase ) -> int: lowerCamelCase =0 for i in range(1 , int(sqrt(_UpperCAmelCase ) + 1 ) ): if n % i == 0 and i != sqrt(_UpperCAmelCase ): total += i + n // i elif i == sqrt(_UpperCAmelCase ): total += i return total - n def _lowercase ( _UpperCAmelCase = 1_00_00 ) -> int: lowerCamelCase =sum( i for i in range(1 , _UpperCAmelCase ) if sum_of_divisors(sum_of_divisors(_UpperCAmelCase ) ) == i and sum_of_divisors(_UpperCAmelCase ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
269
0
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ ): _lowerCamelCase : str = 2 _lowerCamelCase : List[Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
630
"""simple docstring""" import re def _snake_case ( lowercase__ ): if len(re.findall('[ATCG]' , lowercase__ ) ) != len(lowercase__ ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
630
1
'''simple docstring''' def __UpperCamelCase( _A : int , _A : list[int] , _A : int ): '''simple docstring''' def count_of_possible_combinations(_A : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_A ) def __UpperCamelCase( _A : int , _A : list[int] , _A : int ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( _A : int , _A : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] UpperCAmelCase__ : List[Any] = sum( count_of_possible_combinations_with_dp_array(target - item , _A ) for item in array ) UpperCAmelCase__ : str = answer return answer UpperCAmelCase__ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_A , _A ) def __UpperCamelCase( _A : int , _A : list[int] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Any = [0] * (target + 1) UpperCAmelCase__ : Any = 1 for i in range(1 , target + 1 ): for j in range(_A ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Optional[Any] = 3 UpperCamelCase__ : Optional[int] = 5 UpperCamelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
496
'''simple docstring''' import re def __UpperCamelCase( _A : str ): '''simple docstring''' if len(re.findall('''[ATCG]''' , _A ) ) != len(_A ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
496
1