code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from itertools import permutations def __snake_case ( _UpperCamelCase ) -> Optional[Any]: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _a = [7, 11, 13, 17] for i, test in enumerate(UpperCamelCase_ ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __snake_case ( _UpperCamelCase = 10 ) -> int: return sum( int(''''''.join(map(UpperCamelCase_ , UpperCamelCase_ ) ) ) for num in permutations(range(UpperCamelCase_ ) ) if is_substring_divisible(UpperCamelCase_ ) ) if __name__ == "__main__": print(F'''{solution() = }''')
487
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer _lowerCamelCase : Any = logging.get_logger(__name__) class __snake_case (_a ): lowerCAmelCase__ = "AutoTokenizer" lowerCAmelCase__ = ["tokenizer"] lowerCAmelCase__ = { "semantic_prompt": 1, "coarse_prompt": 2, "fine_prompt": 2, } def __init__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=None ) -> str: '''simple docstring''' super().__init__(_UpperCAmelCase ) _lowerCAmelCase : List[Any] = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]="speaker_embeddings_path.json" , **_UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if speaker_embeddings_dict_path is not None: _lowerCAmelCase : Union[str, Any] = get_file_from_repo( _UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop("""subfolder""" , _UpperCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , _UpperCAmelCase ) , force_download=kwargs.pop("""force_download""" , _UpperCAmelCase ) , proxies=kwargs.pop("""proxies""" , _UpperCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , _UpperCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , _UpperCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _UpperCAmelCase ) , revision=kwargs.pop("""revision""" , _UpperCAmelCase ) , ) if speaker_embeddings_path is None: logger.warning( f"`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) _lowerCAmelCase : Union[str, Any] = None else: with open(_UpperCAmelCase ) as speaker_embeddings_json: _lowerCAmelCase : List[Any] = json.load(_UpperCAmelCase ) else: _lowerCAmelCase : Union[str, Any] = None _lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : int="speaker_embeddings_path.json" , _UpperCAmelCase : List[str]="speaker_embeddings" , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Union[str, Any] , ) -> int: '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , """v2""" ) , exist_ok=_UpperCAmelCase ) _lowerCAmelCase : Any = {} _lowerCAmelCase : List[str] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCAmelCase : List[str] = self._load_voice_preset(_UpperCAmelCase ) _lowerCAmelCase : Tuple = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , _UpperCAmelCase , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , ) _lowerCAmelCase : str = os.path.join(_UpperCAmelCase , f"{prompt_key}_{key}.npy" ) _lowerCAmelCase : int = tmp_dict with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as fp: json.dump(_UpperCAmelCase , _UpperCAmelCase ) super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _lowerCAmelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCAmelCase : str = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) _lowerCAmelCase : Tuple = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _UpperCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , _UpperCAmelCase ) , force_download=kwargs.pop("""force_download""" , _UpperCAmelCase ) , proxies=kwargs.pop("""proxies""" , _UpperCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , _UpperCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , _UpperCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , _UpperCAmelCase ) , revision=kwargs.pop("""revision""" , _UpperCAmelCase ) , ) if path is None: raise ValueError( f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) _lowerCAmelCase : int = np.load(_UpperCAmelCase ) return voice_preset_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Optional[dict] = None ) -> Optional[Any]: '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self : List[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any="pt" , _UpperCAmelCase : List[str]=256 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : int=False , **_UpperCAmelCase : List[str] , ) -> str: '''simple docstring''' if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ): if ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCAmelCase : List[Any] = self._load_voice_preset(_UpperCAmelCase ) else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith(""".npz""" ): _lowerCAmelCase : Union[str, Any] = voice_preset + """.npz""" _lowerCAmelCase : int = np.load(_UpperCAmelCase ) if voice_preset is not None: self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase : Tuple = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) _lowerCAmelCase : Any = self.tokenizer( _UpperCAmelCase , return_tensors=_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) if voice_preset is not None: _lowerCAmelCase : Optional[int] = voice_preset return encoded_text
429
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCAmelCase_ : __lowerCamelCase : Dict = XGLMConfig __lowerCamelCase : int = {} __lowerCamelCase : Dict = "gelu" def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Union[str, Any]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = d_model _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = ffn_dim _lowerCAmelCase = activation_function _lowerCAmelCase = activation_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = initializer_range _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = 2 _lowerCAmelCase = 1 def _snake_case ( self ) -> Optional[int]: return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _snake_case ( self ) -> int: _lowerCAmelCase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = self.get_config() _lowerCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _snake_case ( self ) -> Tuple: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCAmelCase , ) def _snake_case ( self ) -> str: _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __lowerCamelCase : str = (TFXGLMForCausalLM,) if is_tf_available() else () __lowerCamelCase : List[str] = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[str] = False __lowerCamelCase : Any = False def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = TFXGLMModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 ) def _snake_case ( self ) -> Optional[int]: self.config_tester.run_common_tests() @slow def _snake_case ( self ) -> List[str]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = TFXGLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _snake_case ( self ) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def _snake_case ( self , _lowerCAmelCase=True ) -> Any: _lowerCAmelCase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _lowerCAmelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on _lowerCAmelCase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCAmelCase ) @slow def _snake_case ( self ) -> str: _lowerCAmelCase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _lowerCAmelCase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) _lowerCAmelCase = tokenizer("Today is a nice day and" , return_tensors="tf" ) _lowerCAmelCase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): _lowerCAmelCase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , seed=[7, 0] ) _lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) @slow def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _lowerCAmelCase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _lowerCAmelCase = "left" # use different length sentences to test batching _lowerCAmelCase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="tf" , padding=_lowerCAmelCase ) _lowerCAmelCase = inputs["input_ids"] _lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , attention_mask=inputs["attention_mask"] , max_new_tokens=12 ) _lowerCAmelCase = tokenizer(sentences[0] , return_tensors="tf" ).input_ids _lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=12 ) _lowerCAmelCase = tokenizer(sentences[1] , return_tensors="tf" ).input_ids _lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=12 ) _lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase ) _lowerCAmelCase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] )
489
'''simple docstring''' _SCREAMING_SNAKE_CASE = range(2, 20 + 1) _SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)] _SCREAMING_SNAKE_CASE = {} def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ) _lowerCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) ) _lowerCAmelCase , _lowerCAmelCase = 0, 0 _lowerCAmelCase = n - i _lowerCAmelCase = memo.get(SCREAMING_SNAKE_CASE_ ) if sub_memo is not None: _lowerCAmelCase = sub_memo.get(SCREAMING_SNAKE_CASE_ ) if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0: # find and make the largest jump without going over _lowerCAmelCase = -1 for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _lowerCAmelCase = _k break if max_jump >= 0: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = jumps[max_jump] # since the difference between jumps is cached, add c _lowerCAmelCase = diff + c for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ): _lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 ) if new_c > 0: add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: _lowerCAmelCase = [] else: _lowerCAmelCase = {c: []} _lowerCAmelCase = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _lowerCAmelCase , _lowerCAmelCase = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ ) diff += _diff dn += terms_jumped _lowerCAmelCase = sub_memo[c] # keep jumps sorted by # of terms skipped _lowerCAmelCase = 0 while j < len(SCREAMING_SNAKE_CASE_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) ) return (diff, dn) def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' if i >= n: return 0, i if k > len(SCREAMING_SNAKE_CASE_ ): a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _lowerCAmelCase = i _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _lowerCAmelCase = ds_c + ds_b diff += addend _lowerCAmelCase = 0 for j in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = a_i[j] + addend _lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return diff, i - start_i def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ): _lowerCAmelCase = digits[j] + addend if s >= 10: _lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 ) _lowerCAmelCase = addend // 10 + quotient else: _lowerCAmelCase = s _lowerCAmelCase = addend // 10 if addend == 0: break while addend > 0: _lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 ) digits.append(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : int = 10**15 ): '''simple docstring''' _lowerCAmelCase = [1] _lowerCAmelCase = 1 _lowerCAmelCase = 0 while True: _lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , 20 , i + dn , SCREAMING_SNAKE_CASE_ ) dn += terms_jumped if dn == n - i: break _lowerCAmelCase = 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'''{solution() = }''')
489
1
"""simple docstring""" import math def _lowerCamelCase ( UpperCAmelCase_ : float, UpperCAmelCase_ : float ) -> float: """simple docstring""" if initial_intensity < 0: raise ValueError("The value of intensity cannot be negative" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
104
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } SCREAMING_SNAKE_CASE = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]: for attribute in key.split("." ): UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if weight_type is not None: UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape else: UpperCAmelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase_ = value elif weight_type == "weight_g": UpperCAmelCase_ = value elif weight_type == "weight_v": UpperCAmelCase_ = value elif weight_type == "bias": UpperCAmelCase_ = value else: UpperCAmelCase_ = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ = [] UpperCAmelCase_ = fairseq_model.state_dict() UpperCAmelCase_ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ = False if "conv_layers" in name: load_conv_layer( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase_ = True if "*" in mapped_key: UpperCAmelCase_ = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2] UpperCAmelCase_ = mapped_key.replace("*" , __SCREAMING_SNAKE_CASE ) if "weight_g" in name: UpperCAmelCase_ = "weight_g" elif "weight_v" in name: UpperCAmelCase_ = "weight_v" elif "bias" in name: UpperCAmelCase_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase_ = "weight" else: UpperCAmelCase_ = None set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(__SCREAMING_SNAKE_CASE ) logger.warning(f'''Unused weights: {unused_weights}''' ) def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any: UpperCAmelCase_ = full_name.split("conv_layers." )[-1] UpperCAmelCase_ = name.split("." ) UpperCAmelCase_ = int(items[0] ) UpperCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__SCREAMING_SNAKE_CASE ) @torch.no_grad() def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: UpperCAmelCase_ = UniSpeechSatConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ = UniSpeechSatConfig() UpperCAmelCase_ = "" if is_finetuned: UpperCAmelCase_ = UniSpeechSatForCTC(__SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ = UniSpeechSatForPreTraining(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) UpperCAmelCase_ = model[0].eval() recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) SCREAMING_SNAKE_CASE = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
579
0
# Function to print upper half of diamond (pyramid) def A ( lowercase__ : Tuple ) -> Union[str, Any]: for i in range(0 , lowercase__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def A ( lowercase__ : Optional[Any] ) -> Optional[Any]: for i in range(lowercase__ , 0 , -1 ): for _ in range(lowercase__ , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def A ( lowercase__ : int ) -> Union[str, Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(lowercase__ ) # upper half reverse_floyd(lowercase__ ) # lower half if __name__ == "__main__": print(r"| /\ | |- | |- |--| |\ /| |-") print(r"|/ \| |- |_ |_ |__| | \/ | |_") UpperCamelCase = 1 while K: UpperCamelCase = int(input("enter the number and , and see the magic : ")) print() pretty_print(user_number) UpperCamelCase = int(input("press 0 to exit... and 1 to continue...")) print("Good Bye...")
383
from manim import * class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) UpperCamelCase__ :int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCamelCase__ :Dict = [mem.copy() for i in range(6 )] UpperCamelCase__ :Any = [mem.copy() for i in range(6 )] UpperCamelCase__ :List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Dict = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Union[str, Any] = Text("""CPU""" , font_size=24 ) UpperCamelCase__ :str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) UpperCamelCase__ :List[str] = [mem.copy() for i in range(1 )] UpperCamelCase__ :Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Optional[Any] = Text("""GPU""" , font_size=24 ) UpperCamelCase__ :Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) gpu.align_to(lowerCamelCase__ , lowerCamelCase__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = [mem.copy() for i in range(6 )] UpperCamelCase__ :Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :str = Text("""Model""" , font_size=24 ) UpperCamelCase__ :Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , ) UpperCamelCase__ :Tuple = MarkupText( f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) UpperCamelCase__ :Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase__ :Tuple = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) ) self.add(lowerCamelCase__ ) UpperCamelCase__ :Any = [] UpperCamelCase__ :List[Any] = [] UpperCamelCase__ :int = [] for i, rect in enumerate(lowerCamelCase__ ): UpperCamelCase__ :int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 ) cpu_target.move_to(lowerCamelCase__ ) cpu_target.generate_target() UpperCamelCase__ :Any = 0.46 / 4 UpperCamelCase__ :Optional[Any] = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 ) cpu_targs.append(lowerCamelCase__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) ) second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(*lowerCamelCase__ ) self.wait()
383
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = [[1, 2, 4], [1, 2, 3, 4]] UpperCamelCase = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = [[1, 2, 3], [1, 2, 4]] UpperCamelCase = DisjunctiveConstraint(__magic_name__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(1 ) UpperCamelCase = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(2 ) UpperCamelCase = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(3 ) UpperCamelCase = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCamelCase = DisjunctiveConstraint(__magic_name__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase , UpperCamelCase , UpperCamelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
386
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
0
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self , a , a = 13 , a = 64 , a = 2 , a = 3 , a = 3 , a = True , a = True , a = 1_28 , a=[16, 32, 64, 1_28] , a = 7 , a = 4 , a = 37 , a = "gelu" , a = 0.1 , a = 0.1 , a = 10 , a = 0.02 , a = 2 , a = 1 , a = 1_28 , a = [2, 2, 2, 2] , a = 2 , a = 2 , ) -> Tuple: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = encoder_stride snake_case_ = num_attention_outputs snake_case_ = embed_dim snake_case_ = embed_dim + 1 snake_case_ = resolution snake_case_ = depths snake_case_ = hidden_sizes snake_case_ = dim snake_case_ = mlp_expansion_ratio def _UpperCamelCase ( self ) -> Union[str, Any]: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = self.get_config() return config, pixel_values, labels def _UpperCamelCase ( self ) -> Optional[int]: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _UpperCamelCase ( self , a , a , a ) -> Optional[Any]: snake_case_ = TFEfficientFormerModel(config=a ) snake_case_ = model(a , training=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self , a , a , a ) -> Dict: snake_case_ = self.type_sequence_label_size snake_case_ = TFEfficientFormerForImageClassification(a ) snake_case_ = model(a , labels=a , training=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ = 1 snake_case_ = TFEfficientFormerForImageClassification(a ) snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ) -> int: snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _UpperCamelCase ( self ) -> int: snake_case_ = TFEfficientFormerModelTester(self ) snake_case_ = ConfigTester( self , config_class=a , has_text_modality=a , hidden_size=37 ) def _UpperCamelCase ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def _UpperCamelCase ( self ) -> str: pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def _UpperCamelCase ( self ) -> Dict: pass def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(a ) snake_case_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCamelCase ( self ) -> List[str]: def check_hidden_states_output(a , a , a ): snake_case_ = model_class(a ) snake_case_ = model(**self._prepare_for_class(a , a ) , training=a ) snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a ) , a ) if hasattr(self.model_tester , 'encoder_seq_length' ): snake_case_ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: snake_case_ = seq_length * self.model_tester.chunk_length else: snake_case_ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: snake_case_ = outputs.decoder_hidden_states self.asseretIsInstance(a , (list, tuple) ) self.assertEqual(len(a ) , a ) snake_case_ = getattr(self.model_tester , 'seq_length' , a ) snake_case_ = getattr(self.model_tester , 'decoder_seq_length' , a ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(a , a , a ) def _UpperCamelCase ( self , a , a , a=False ) -> List[Any]: snake_case_ = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _UpperCamelCase ( self ) -> int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _UpperCamelCase ( self ) -> str: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _UpperCamelCase ( self ) -> Union[str, Any]: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = TFEfficientFormerModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCamelCase ( self ) -> str: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True snake_case_ = getattr(self.model_tester , 'seq_length' , a ) snake_case_ = getattr(self.model_tester , 'encoder_seq_length' , a ) snake_case_ = getattr(self.model_tester , 'key_length' , a ) snake_case_ = getattr(self.model_tester , 'chunk_length' , a ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): snake_case_ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: snake_case_ = True snake_case_ = False snake_case_ = True snake_case_ = model_class(a ) snake_case_ = model(**self._prepare_for_class(a , a ) , training=a ) snake_case_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ = True snake_case_ = model_class(a ) snake_case_ = model(**self._prepare_for_class(a , a ) , training=a ) snake_case_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _UpperCamelCase ( self ) -> List[str]: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model snake_case_ = model_class(a ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes snake_case_ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a ) for key, val in model.input_signature.items() if key in model.dummy_inputs } snake_case_ = model(a ) self.assertTrue(outputs_dict is not None ) def __UpperCAmelCase ( ): snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _UpperCamelCase ( self ) -> int: return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def _UpperCamelCase ( self ) -> int: snake_case_ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=a , return_tensors='tf' ) # forward pass snake_case_ = model(**a , training=a ) # verify the logits snake_case_ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , a ) snake_case_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @slow def _UpperCamelCase ( self ) -> Dict: snake_case_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=a , return_tensors='tf' ) # forward pass snake_case_ = model(**a , training=a ) # verify the logits snake_case_ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , a ) snake_case_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
607
def __UpperCAmelCase ( a_): if not isinstance(a_ , a_): raise ValueError('Input must be an integer') if input_num <= 0: raise ValueError('Input must be positive') return sum( divisor for divisor in range(1 , input_num // 2 + 1) if input_num % divisor == 0) if __name__ == "__main__": import doctest doctest.testmod()
607
1
'''simple docstring''' from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
236
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = AlbertConfig.from_json_file(__lowercase ) print(f'Building PyTorch model from configuration: {config}' ) _UpperCAmelCase = AlbertForPreTraining(__lowercase ) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
236
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _lowerCamelCase( ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' ) return image def _lowerCamelCase( lowerCAmelCase__ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _lowerCamelCase( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dct.pop(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = val def _lowerCamelCase( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) SCREAMING_SNAKE_CASE_ : List[str] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ ), v_bias) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = 364 if '''coco''' in model_name else 224 SCREAMING_SNAKE_CASE_ : Dict = BlipaVisionConfig(image_size=lowerCAmelCase__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: SCREAMING_SNAKE_CASE_ : Dict = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCAmelCase__ ).to_dict() elif "opt-6.7b" in model_name: SCREAMING_SNAKE_CASE_ : Optional[int] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCAmelCase__ ).to_dict() elif "t5-xl" in model_name: SCREAMING_SNAKE_CASE_ : Dict = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: SCREAMING_SNAKE_CASE_ : str = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() SCREAMING_SNAKE_CASE_ : Dict = BlipaConfig(vision_config=lowerCAmelCase__ , text_config=lowerCAmelCase__ ) return config, image_size @torch.no_grad() def _lowerCamelCase( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : str=False ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) SCREAMING_SNAKE_CASE_ : int = tokenizer('\n' , add_special_tokens=lowerCAmelCase__ ).input_ids[0] SCREAMING_SNAKE_CASE_ : List[Any] = get_blipa_config(lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = BlipaForConditionalGeneration(lowerCAmelCase__ ).eval() SCREAMING_SNAKE_CASE_ : Dict = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } SCREAMING_SNAKE_CASE_ : int = model_name_to_original[model_name] # load original model print('Loading original model...' ) SCREAMING_SNAKE_CASE_ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu''' SCREAMING_SNAKE_CASE_ : List[Any] = load_model_and_preprocess( name=lowerCAmelCase__ , model_type=lowerCAmelCase__ , is_eval=lowerCAmelCase__ , device=lowerCAmelCase__ ) original_model.eval() print('Done!' ) # update state dict keys SCREAMING_SNAKE_CASE_ : List[Any] = original_model.state_dict() SCREAMING_SNAKE_CASE_ : List[str] = create_rename_keys(lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.pop(lowerCAmelCase__ ) if key.startswith('Qformer.bert' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: SCREAMING_SNAKE_CASE_ : str = key.replace('self' , 'attention' ) if "opt_proj" in key: SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: SCREAMING_SNAKE_CASE_ : Dict = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): SCREAMING_SNAKE_CASE_ : str = key.replace('opt' , 'language' ) if key.startswith('t5' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('t5' , 'language' ) SCREAMING_SNAKE_CASE_ : List[Any] = val # read in qv biases read_in_q_v_bias(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = hf_model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) assert len(lowerCAmelCase__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] SCREAMING_SNAKE_CASE_ : str = load_demo_image() SCREAMING_SNAKE_CASE_ : Any = vis_processors['''eval'''](lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ ) # create processor SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = BlipaProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase__ ) # make sure processor creates exact same pixel values assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) original_model.to(lowerCAmelCase__ ) hf_model.to(lowerCAmelCase__ ) with torch.no_grad(): if "opt" in model_name: SCREAMING_SNAKE_CASE_ : Dict = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits SCREAMING_SNAKE_CASE_ : Any = hf_model(lowerCAmelCase__ , lowerCAmelCase__ ).logits else: SCREAMING_SNAKE_CASE_ : List[Any] = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits SCREAMING_SNAKE_CASE_ : str = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) SCREAMING_SNAKE_CASE_ : List[Any] = hf_model(lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor( [[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=lowerCAmelCase__ ) assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=lowerCAmelCase__ ) else: # cast to same type SCREAMING_SNAKE_CASE_ : List[str] = logits.dtype assert torch.allclose(original_logits.to(lowerCAmelCase__ ) , lowerCAmelCase__ , atol=1E-2 ) print('Looks ok!' ) print('Generating a caption...' ) SCREAMING_SNAKE_CASE_ : List[str] = '''''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(lowerCAmelCase__ , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = original_model.generate({'image': original_pixel_values} ) SCREAMING_SNAKE_CASE_ : Dict = hf_model.generate( lowerCAmelCase__ , lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.shape[1] SCREAMING_SNAKE_CASE_ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [text.strip() for text in output_text] print('HF generation:' , lowerCAmelCase__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(lowerCAmelCase__ ) hf_model.save_pretrained(lowerCAmelCase__ ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": A = argparse.ArgumentParser() A = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) A = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
717
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py A = 'src/transformers' A = 'docs/source/en/tasks' def _lowerCamelCase( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ): '''simple docstring''' with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: SCREAMING_SNAKE_CASE_ : Optional[int] = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : Optional[int] = 0 while not lines[start_index].startswith(lowerCAmelCase__ ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : int = start_index while not lines[end_index].startswith(lowerCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. A = direct_transformers_import(TRANSFORMERS_PATH) A = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). A = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _lowerCamelCase( lowerCAmelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = TASK_GUIDE_TO_MODELS[task_guide] SCREAMING_SNAKE_CASE_ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set() ) SCREAMING_SNAKE_CASE_ : Optional[int] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def _lowerCamelCase( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _find_text_in_file( filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) SCREAMING_SNAKE_CASE_ : Dict = get_model_list_for_task(lowerCAmelCase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' ' to fix this.' ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') A = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
97
0
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a__ : int = logging.getLogger(__name__) class __magic_name__ ( _UpperCamelCase ): def __init__( self , __magic_name__=-1 ): """simple docstring""" _lowerCAmelCase = label_idx def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ): """simple docstring""" if isinstance(lowercase__ , lowercase__ ): _lowerCAmelCase = mode.value _lowerCAmelCase = os.path.join(lowercase__ , F'''{mode}.txt''' ) _lowerCAmelCase = 1 _lowerCAmelCase = [] with open(lowercase__ , encoding='utf-8' ) as f: _lowerCAmelCase = [] _lowerCAmelCase = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 _lowerCAmelCase = [] _lowerCAmelCase = [] else: _lowerCAmelCase = line.split(' ' ) words.append(splits[0] ) if len(lowercase__ ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) ) return examples def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ): """simple docstring""" _lowerCAmelCase = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(lowercase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _lowerCAmelCase = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase__ ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" if path: with open(lowercase__ , 'r' ) as f: _lowerCAmelCase = f.read().splitlines() if "O" not in labels: _lowerCAmelCase = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __magic_name__ ( _UpperCamelCase ): def __init__( self ): """simple docstring""" super().__init__(label_idx=-2 ) def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" if path: with open(lowercase__ , 'r' ) as f: _lowerCAmelCase = f.read().splitlines() if "O" not in labels: _lowerCAmelCase = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __magic_name__ ( _UpperCamelCase ): def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ): """simple docstring""" if isinstance(lowercase__ , lowercase__ ): _lowerCAmelCase = mode.value _lowerCAmelCase = os.path.join(lowercase__ , F'''{mode}.txt''' ) _lowerCAmelCase = 1 _lowerCAmelCase = [] with open(lowercase__ , encoding='utf-8' ) as f: for sentence in parse_incr(lowercase__ ): _lowerCAmelCase = [] _lowerCAmelCase = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(lowercase__ ) == len(lowercase__ ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 return examples def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ): """simple docstring""" _lowerCAmelCase = 0 for sentence in parse_incr(lowercase__ ): _lowerCAmelCase = preds_list[example_id] _lowerCAmelCase = "" for token in sentence: out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowercase__ ) example_id += 1 def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" if path: with open(lowercase__ , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
589
'''simple docstring''' from sklearn.metrics import fa_score import datasets lowerCAmelCase_ : int = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ lowerCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ lowerCAmelCase_ : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def snake_case__ ( self : str ) ->Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def snake_case__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=1 , lowercase__ : Optional[int]="binary" , lowercase__ : int=None ) ->int: '''simple docstring''' _UpperCamelCase : List[str] = fa_score( lowercase__ , lowercase__ , labels=lowercase__ , pos_label=lowercase__ , average=lowercase__ , sample_weight=lowercase__ ) return {"f1": float(lowercase__ ) if score.size == 1 else score}
435
0
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _a ( lowerCamelCase_ ): snake_case : List[Any] =analyze_text(__snake_case ) snake_case : List[str] =list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. snake_case : int =sum(single_char_strings.values() ) # one length string snake_case : Optional[int] =0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: snake_case : Union[str, Any] =single_char_strings[ch] snake_case : Dict =my_str / all_sum my_fir_sum += prob * math.loga(__snake_case ) # entropy formula. # print entropy print(F'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string snake_case : int =sum(two_char_strings.values() ) snake_case : List[Any] =0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: snake_case : str =cha + cha if sequence in two_char_strings: snake_case : Dict =two_char_strings[sequence] snake_case : Any =int(__snake_case ) / all_sum my_sec_sum += prob * math.loga(__snake_case ) # print second entropy print(F'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def _a ( lowerCamelCase_ ): snake_case : List[str] =Counter() # type: ignore snake_case : List[str] =Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _a ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
713
'''simple docstring''' def _a ( lowerCamelCase_ = 3 , lowerCamelCase_ = 7 , lowerCamelCase_ = 1_00_00_00 ): snake_case : List[str] =0 snake_case : Dict =1 for current_denominator in range(1 , limit + 1 ): snake_case : Optional[Any] =current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: snake_case : Any =current_numerator snake_case : str =current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
136
0
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ): return number | (1 << position) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ): return number & ~(1 << position) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ): return number ^ (1 << position) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ): return ((number >> position) & 1) == 1 def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
204
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __lowerCamelCase = TypeVar("""T""") class UpperCAmelCase ( Generic[T] ): def __init__(self : int , snake_case__ : list[T] , snake_case__ : Callable[[T, T], T] ) -> None: '''simple docstring''' snake_case : Any | T = None snake_case : int = len(snake_case__ ) snake_case : list[T] = [any_type for _ in range(self.N )] + arr snake_case : str = fnc self.build() def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None: '''simple docstring''' for p in range(self.N - 1 , 0 , -1 ): snake_case : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int , snake_case__ : T ) -> None: '''simple docstring''' p += self.N snake_case : int = v while p > 1: snake_case : List[str] = p // 2 snake_case : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : int ) -> T | None: # noqa: E741 '''simple docstring''' snake_case , snake_case : Optional[int] = l + self.N, r + self.N snake_case : T | None = None while l <= r: if l % 2 == 1: snake_case : List[str] = self.st[l] if res is None else self.fn(snake_case__ , self.st[l] ) if r % 2 == 0: snake_case : int = self.st[r] if res is None else self.fn(snake_case__ , self.st[r] ) snake_case , snake_case : str = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __lowerCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __lowerCamelCase = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __lowerCamelCase = SegmentTree(test_array, min) __lowerCamelCase = SegmentTree(test_array, max) __lowerCamelCase = SegmentTree(test_array, lambda a, b: a + b) def UpperCamelCase ( ): for i in range(len(__lowerCamelCase ) ): for j in range(__lowerCamelCase , len(__lowerCamelCase ) ): snake_case : int = reduce(__lowerCamelCase , test_array[i : j + 1] ) snake_case : Tuple = reduce(__lowerCamelCase , test_array[i : j + 1] ) snake_case : Union[str, Any] = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowerCamelCase , __lowerCamelCase ) assert max_range == max_segment_tree.query(__lowerCamelCase , __lowerCamelCase ) assert sum_range == sum_segment_tree.query(__lowerCamelCase , __lowerCamelCase ) test_all_segments() for index, value in test_updates.items(): __lowerCamelCase = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
204
1
"""simple docstring""" from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any] ): lowerCAmelCase : Optional[int] = k_size // 2 lowerCAmelCase : str = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCAmelCase : int = 1 / (2 * pi * sigma) * exp(-(square(_snake_case ) + square(_snake_case )) / (2 * square(_snake_case )) ) return g def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : List[str] ): lowerCAmelCase : str = image.shape[0], image.shape[1] # dst image height and width lowerCAmelCase : Optional[Any] = height - k_size + 1 lowerCAmelCase : Any = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCAmelCase : Dict = zeros((dst_height * dst_width, k_size * k_size) ) lowerCAmelCase : Optional[int] = 0 for i, j in product(range(_snake_case ) , range(_snake_case ) ): lowerCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] ) lowerCAmelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCAmelCase : Union[str, Any] = gen_gaussian_kernel(_snake_case , _snake_case ) lowerCAmelCase : List[str] = ravel(_snake_case ) # reshape and get the dst image lowerCAmelCase : str = dot(_snake_case , _snake_case ).reshape(_snake_case , _snake_case ).astype(_snake_case ) return dst if __name__ == "__main__": # read original image snake_case__ = imread(R'''../image_data/lena.jpg''') # turn image in gray scale value snake_case__ = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size snake_case__ = gaussian_filter(gray, 3, sigma=1) snake_case__ = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
717
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : list[float] ): if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) ) return round(_snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
637
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase__ = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
110
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase__ = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } UpperCamelCase__ = { 'junnyu/roformer_chinese_small': 15_36, 'junnyu/roformer_chinese_base': 15_36, 'junnyu/roformer_chinese_char_small': 5_12, 'junnyu/roformer_chinese_char_base': 5_12, 'junnyu/roformer_small_discriminator': 1_28, 'junnyu/roformer_small_generator': 1_28, } UpperCamelCase__ = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class a ( lowercase ): UpperCamelCase : int = VOCAB_FILES_NAMES UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION UpperCamelCase : Optional[int] = RoFormerTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case or pre_tok_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents ): UpperCAmelCase__ : Any = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) ) UpperCAmelCase__ : str = do_lower_case UpperCAmelCase__ : Union[str, Any] = strip_accents UpperCAmelCase__ : Dict = pre_tok_class(**UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = do_lower_case def __getstate__( self ): UpperCAmelCase__ : int = self.__dict__.copy() UpperCAmelCase__ : int = BertPreTokenizer() return state def __setstate__( self , UpperCamelCase_ ): UpperCAmelCase__ : Union[str, Any] = d UpperCAmelCase__ : List[str] = self.__dict__['_tokenizer'].get_vocab() UpperCAmelCase__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase_ ) ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ): UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): UpperCAmelCase__ : int = [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): UpperCAmelCase__ : Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ): UpperCAmelCase__ : int = BertPreTokenizer() return super().save_pretrained(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
110
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) __UpperCamelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house __UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim __UpperCamelCase = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCamelCase = model(_lowercase )['last_hidden_state'].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) __UpperCamelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house __UpperCamelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim __UpperCamelCase = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __UpperCamelCase = model(_lowercase )['last_hidden_state'].detach() self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
703
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase : Tuple = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase : Dict = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def A ( snake_case :List[str] , snake_case :Optional[int] , snake_case :Optional[Any] ) -> Any: __UpperCamelCase = SavedModel() __UpperCamelCase = [] with open(os.path.join(snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: __UpperCamelCase = json.load(snake_case )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(snake_case )] ) with open(snake_case , 'rb' ) as f: saved_model.ParseFromString(f.read() ) __UpperCamelCase = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want __UpperCamelCase = sorted(snake_case ) __UpperCamelCase = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(snake_case ) if strict and len(snake_case ) > 0: raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops ) elif len(snake_case ) > 0: print(f'Found the following incompatible ops for the opset {opset}:' ) print(*snake_case , sep='\n' ) else: print(f'The saved model {saved_model_path} can properly be converted with ONNX.' ) if __name__ == "__main__": UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) UpperCamelCase : str = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
293
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case : str = logging.get_logger(__name__) class _snake_case ( snake_case ): UpperCamelCase__ = ['pixel_values'] def __init__( self , _a = True , _a = None , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ): super().__init__(**_a ) __magic_name__ : List[str] = size if size is not None else {"shortest_edge": 384} __magic_name__ : Dict = get_size_dict(_a , default_to_square=_a ) __magic_name__ : Tuple = do_resize __magic_name__ : Any = size # Default value set here for backwards compatibility where the value in config is None __magic_name__ : Optional[int] = crop_pct if crop_pct is not None else 224 / 256 __magic_name__ : str = resample __magic_name__ : Any = do_rescale __magic_name__ : List[str] = rescale_factor __magic_name__ : int = do_normalize __magic_name__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): __magic_name__ : Optional[Any] = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) __magic_name__ : List[Any] = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __magic_name__ : List[Any] = int(shortest_edge / crop_pct ) __magic_name__ : Dict = get_resize_output_image_size(_a , size=_a , default_to_square=_a ) __magic_name__ : Optional[Any] = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = None , **_a , ): return rescale(_a , scale=_a , data_format=_a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a = None , **_a , ): return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): __magic_name__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __magic_name__ : List[str] = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Dict = resample if resample is not None else self.resample __magic_name__ : Dict = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean __magic_name__ : Optional[int] = image_std if image_std is not None else self.image_std __magic_name__ : List[str] = size if size is not None else self.size __magic_name__ : Any = get_size_dict(_a , default_to_square=_a ) __magic_name__ : Optional[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __magic_name__ : List[Any] = [to_numpy_array(_a ) for image in images] if do_resize: __magic_name__ : Union[str, Any] = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images] if do_rescale: __magic_name__ : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: __magic_name__ : Any = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] __magic_name__ : List[Any] = [to_channel_dimension_format(_a , _a ) for image in images] __magic_name__ : Union[str, Any] = {"pixel_values": images} return BatchFeature(data=_a , tensor_type=_a )
124
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> float: '''simple docstring''' def get_matched_characters(_snake_case : str , _snake_case : str ) -> str: __magic_name__ : str = [] __magic_name__ : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __magic_name__ : str = int(max(0 , i - limit ) ) __magic_name__ : Dict = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(_snake_case ) __magic_name__ : Dict = F'''{_stra[0:_stra.index(_snake_case )]} {_stra[_stra.index(_snake_case ) + 1:]}''' return "".join(_snake_case ) # matching characters __magic_name__ : List[Any] = get_matched_characters(_snake_case , _snake_case ) __magic_name__ : Any = get_matched_characters(_snake_case , _snake_case ) __magic_name__ : List[str] = len(_snake_case ) # transposition __magic_name__ : Tuple = ( len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case ) if ca != ca] ) // 2 ) if not match_count: __magic_name__ : Tuple = 0.0 else: __magic_name__ : List[str] = ( 1 / 3 * ( match_count / len(_snake_case ) + match_count / len(_snake_case ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __magic_name__ : Optional[Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
124
1
from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCAmelCase : List[Any] = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
707
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowerCAmelCase : Tuple = logging.getLogger(__name__) _lowerCAmelCase : Any = "Hello world! cécé herlolip" _lowerCAmelCase : Union[str, Any] = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = BertAbsConfig( temp_dir='.' , finetune_bert=snake_case__ , large=snake_case__ , share_emb=snake_case__ , use_bert_emb=snake_case__ , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) lowerCAmelCase__ = torch.load(snake_case__ , lambda snake_case__ , snake_case__ : storage ) lowerCAmelCase__ = AbsSummarizer(snake_case__ , torch.device('cpu' ) , snake_case__ ) original.eval() lowerCAmelCase__ = BertAbsSummarizer(snake_case__ , torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) lowerCAmelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs lowerCAmelCase__ = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) ) lowerCAmelCase__ = torch.tensor(snake_case__ ).unsqueeze(0 ) lowerCAmelCase__ = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) ) lowerCAmelCase__ = torch.tensor(snake_case__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass lowerCAmelCase__ = encoder_input_ids lowerCAmelCase__ = decoder_input_ids lowerCAmelCase__ = lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = lowerCAmelCase__ = None lowerCAmelCase__ = lowerCAmelCase__ = None lowerCAmelCase__ = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical lowerCAmelCase__ = original(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0] lowerCAmelCase__ = original.generator(snake_case__ ) lowerCAmelCase__ = new_model( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0] lowerCAmelCase__ = new_model.generator(snake_case__ ) lowerCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case__ ) ) lowerCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case__ ) ) lowerCAmelCase__ = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
604
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self : Optional[Any] ): '''simple docstring''' __a = tempfile.mkdtemp() __a = BlipImageProcessor() __a = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) __a = BlipaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(self.tmpdirname ) def __a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer def __a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor def __a ( self : List[str] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __a ( self : Dict ): '''simple docstring''' __a = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] __a = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self : Optional[int] ): '''simple docstring''' __a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __a = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __a = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __a ( self : str ): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __a = self.prepare_image_inputs() __a = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) __a = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __a ( self : Union[str, Any] ): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __a = """lower newer""" __a = processor(text=SCREAMING_SNAKE_CASE__ ) __a = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __a ( self : List[Any] ): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __a = """lower newer""" __a = self.prepare_image_inputs() __a = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __a ( self : str ): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __a = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __a ( self : Optional[int] ): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __a = """lower newer""" __a = self.prepare_image_inputs() __a = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
582
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __a = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __a = features.copy() if features else default_expected_features __a = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) __a = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __a = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __a = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __a = [parquet_path] __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __a = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) ) -> Optional[int]: """simple docstring""" assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: __a = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __a = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __a = features.copy() if features else default_expected_features __a = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) __a = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if split: __a = {split: parquet_path} else: __a = """train""" __a = {"""train""": parquet_path, """test""": parquet_path} __a = tmp_path / """cache""" __a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} __a = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" __a = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 __a = pq.ParquetFile(tmp_path / """foo.parquet""" ) __a = pf.read() assert dataset.data.table == output_table def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" __a = str(shared_datadir / """test_image_rgb.jpg""" ) __a = {"""image""": [image_path]} __a = Features({"""image""": Image()} ) __a = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) __a = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 __a = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features __a = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
582
1
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ): __lowercase : List[str] = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, nicht wahr?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] __lowercase : int = { """wmt16-en-de-dist-12-1""": [28.3, 27.52], """wmt16-en-de-dist-6-1""": [27.4, 27.11], """wmt16-en-de-12-1""": [26.9, 25.75], } __lowercase : Optional[Any] = F"{src_lang}-{tgt_lang}" __lowercase : int = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n" model_card_dir.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : List[Any] = os.path.join(lowerCAmelCase_ , """README.md""" ) print(F"Generating {path}" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(lowerCAmelCase_ ) # make sure we are under the root of the project lowerCamelCase : int = Path(__file__).resolve().parent.parent.parent lowerCamelCase : Dict = repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCamelCase : List[Any] = model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
649
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , """tf_padding""" ) ) self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any: """simple docstring""" __lowercase : List[str] = parent __lowercase : Tuple = batch_size __lowercase : Dict = num_channels __lowercase : Optional[int] = image_size __lowercase : int = depth_multiplier __lowercase : str = depth_divisible_by __lowercase : int = min_depth __lowercase : Tuple = expand_ratio __lowercase : Optional[int] = tf_padding __lowercase : Dict = output_stride __lowercase : Dict = first_layer_is_expansion __lowercase : Optional[Any] = finegrained_output __lowercase : str = hidden_act __lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) __lowercase : Optional[int] = classifier_dropout_prob __lowercase : int = use_labels __lowercase : Optional[int] = is_training __lowercase : Dict = num_labels __lowercase : Tuple = initializer_range __lowercase : Optional[Any] = scope def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : List[Any] = None __lowercase : Optional[Any] = None if self.use_labels: __lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowercase : List[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : Optional[int] = MobileNetVaModel(config=__a ) model.to(__a ) model.eval() __lowercase : Tuple = model(__a ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple: """simple docstring""" __lowercase : List[Any] = self.num_labels __lowercase : Dict = MobileNetVaForImageClassification(__a ) model.to(__a ) model.eval() __lowercase : Dict = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.num_labels __lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a ) model.to(__a ) model.eval() __lowercase : Dict = model(__a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __lowercase : str = model(__a , labels=__a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs __lowercase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Tuple = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) _A : Optional[Any] = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) _A : Tuple = False _A : List[str] = False _A : List[str] = False _A : Optional[int] = False def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = MobileNetVaModelTester(self ) __lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" ) def lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" pass @unittest.skip(reason="""MobileNetV2 does not output attentions""" ) def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : int = [*signature.parameters.keys()] __lowercase : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ): __lowercase : Optional[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) ) __lowercase : Tuple = outputs.hidden_states __lowercase : str = 16 self.assertEqual(len(__a ) , __a ) __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Any = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : Union[str, Any] = True check_hidden_states_output(__a , __a , __a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( ): __lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a ) __lowercase : str = self.default_image_processor __lowercase : Tuple = prepare_img() __lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __lowercase : str = model(**__a ) # verify the logits __lowercase : Union[str, Any] = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) __lowercase : Dict = model.to(__a ) __lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) __lowercase : List[str] = prepare_img() __lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __lowercase : Union[str, Any] = model(**__a ) __lowercase : Any = outputs.logits # verify the logits __lowercase : Dict = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , __a ) __lowercase : str = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=__a , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
649
1
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : Dict = (1 + 2_4 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _UpperCamelCase ( UpperCamelCase__ = 5_0_0_0 ): UpperCAmelCase__ : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCamelCase__ )] for i, pentagonal_i in enumerate(UpperCamelCase__ ): for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ): UpperCAmelCase__ : str = pentagonal_nums[j] UpperCAmelCase__ : Tuple = pentagonal_i + pentagonal_j UpperCAmelCase__ : List[str] = pentagonal_j - pentagonal_i if is_pentagonal(UpperCamelCase__ ) and is_pentagonal(UpperCamelCase__ ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
407
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCAmelCase__ : List[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : List[str] = min_resolution UpperCAmelCase__ : Optional[Any] = max_resolution UpperCAmelCase__ : List[str] = do_resize UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : int = image_mean UpperCAmelCase__ : Dict = image_std UpperCAmelCase__ : Any = do_rescale UpperCAmelCase__ : str = rescale_factor UpperCAmelCase__ : List[str] = do_pad def snake_case__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False): if not batched: UpperCAmelCase__ : List[Any] = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * h / w) UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h) else: UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] UpperCAmelCase__ : int = self.size["""shortest_edge"""] else: UpperCAmelCase__ : str = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0] UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1] return expected_height, expected_width @require_torch @require_vision class _snake_case ( a__ , unittest.TestCase ): lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessingTester(self) @property def snake_case__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""")) self.assertTrue(hasattr(_lowerCamelCase , """image_std""")) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""")) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""")) self.assertTrue(hasattr(_lowerCamelCase , """do_rescale""")) self.assertTrue(hasattr(_lowerCamelCase , """do_pad""")) self.assertTrue(hasattr(_lowerCamelCase , """size""")) def snake_case__ ( self): UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333}) self.assertEqual(image_processor.do_pad , _lowerCamelCase) UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84}) self.assertEqual(image_processor.do_pad , _lowerCamelCase) def snake_case__ ( self): pass def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image) # Test not batched input UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) UpperCAmelCase__ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor) # Test not batched input UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self): # prepare image and target UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f: UpperCAmelCase__ : Dict = json.loads(f.read()) UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target} # encode them UpperCAmelCase__ : Dict = DeformableDetrImageProcessor() UpperCAmelCase__ : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""") # verify pixel values UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase) UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4)) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase)) # verify boxes UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase) UpperCAmelCase__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3)) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase)) # verify is_crowd UpperCAmelCase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase)) # verify class_labels UpperCAmelCase__ : Any = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase)) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase)) # verify size UpperCAmelCase__ : List[Any] = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase)) @slow def snake_case__ ( self): # prepare image, target and masks_path UpperCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f: UpperCAmelCase__ : Optional[int] = json.loads(f.read()) UpperCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""") # encode them UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="""coco_panoptic""") UpperCAmelCase__ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""") # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4)) # verify area UpperCAmelCase__ : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase)) # verify boxes UpperCAmelCase__ : List[str] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase) UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3)) # verify image_id UpperCAmelCase__ : Tuple = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase)) # verify is_crowd UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase)) # verify class_labels UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase)) # verify masks UpperCAmelCase__ : Dict = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase) # verify orig_size UpperCAmelCase__ : Any = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase)) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
407
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : str = logging.get_logger(__name__) def UpperCamelCase__ ( A__ , A__=False ) -> Union[str, Any]: snake_case__ : str = [] # fmt: off # stem: rename_keys.append(('cls_token', 'vit.embeddings.cls_token') ) rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') ) rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') ) # backbone rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') ) rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') ) rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) # fmt: on return rename_keys def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : str = '' else: snake_case__ : str = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Union[str, Any] = in_proj_bias[: config.hidden_size] snake_case__ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase__ ( A__ ) -> Optional[Any]: snake_case__ : Dict = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(A__ , A__ ) def UpperCamelCase__ ( A__ , A__ , A__ ) -> Optional[Any]: snake_case__ : str = dct.pop(A__ ) snake_case__ : Dict = val def UpperCamelCase__ ( ) -> Tuple: snake_case__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : Union[str, Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Optional[Any]: snake_case__ : Optional[int] = BitConfig( global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=A__ , ) snake_case__ : Any = ViTHybridConfig(backbone_config=A__ , image_size=384 , num_labels=1000 ) snake_case__ : List[Any] = False # load original model from timm snake_case__ : Any = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Dict = timm_model.state_dict() if base_model: remove_classification_head_(A__ ) snake_case__ : Tuple = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) snake_case__ : Any = 'huggingface/label-files' snake_case__ : Union[str, Any] = 'imagenet-1k-id2label.json' snake_case__ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) snake_case__ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()} snake_case__ : Union[str, Any] = idalabel snake_case__ : Tuple = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ : Tuple = ViTHybridModel(A__ ).eval() else: snake_case__ : List[Any] = ViTHybridForImageClassification(A__ ).eval() model.load_state_dict(A__ ) # create image processor snake_case__ : int = create_transform(**resolve_data_config({} , model=A__ ) ) snake_case__ : List[Any] = transform.transforms snake_case__ : List[Any] = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } snake_case__ : Union[str, Any] = ViTHybridImageProcessor( do_resize=A__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case__ : Union[str, Any] = prepare_img() snake_case__ : List[Any] = transform(A__ ).unsqueeze(0 ) snake_case__ : List[str] = processor(A__ , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(A__ , A__ ) # verify logits with torch.no_grad(): snake_case__ : str = model(A__ ) snake_case__ : List[str] = outputs.logits print('Predicted class:' , logits.argmax(-1 ).item() ) if base_model: snake_case__ : Optional[int] = timm_model.forward_features(A__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3 ) else: snake_case__ : Optional[int] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(F"""ybelkada/{vit_name}""" ) processor.push_to_hub(F"""ybelkada/{vit_name}""" ) if __name__ == "__main__": lowerCAmelCase__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCAmelCase__ : Optional[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
711
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
699
0
"""simple docstring""" import functools def snake_case ( _a: str , _a: str )-> Optional[int]: '''simple docstring''' lowerCamelCase__ = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase__ = len(_SCREAMING_SNAKE_CASE ) @functools.cache def min_distance(_a: int , _a: int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _SCREAMING_SNAKE_CASE ) , 1 + min_distance(_SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
510
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __lowercase : str = logging.getLogger(__name__) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a=-1 ): '''simple docstring''' __a : Tuple = label_idx def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if isinstance(__a , __a ): __a : Any = mode.value __a : List[Any] = os.path.join(__a , f"""{mode}.txt""" ) __a : Optional[Any] = 1 __a : str = [] with open(__a , encoding='utf-8' ) as f: __a : Tuple = [] __a : Dict = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) ) guid_index += 1 __a : str = [] __a : int = [] else: __a : Optional[int] = line.split(' ' ) words.append(splits[0] ) if len(__a ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) ) return examples def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[str] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __a : Tuple = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__a ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if path: with open(__a , 'r' ) as f: __a : Any = f.read().splitlines() if "O" not in labels: __a : Optional[int] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if path: with open(__a , 'r' ) as f: __a : Any = f.read().splitlines() if "O" not in labels: __a : List[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if isinstance(__a , __a ): __a : Dict = mode.value __a : List[str] = os.path.join(__a , f"""{mode}.txt""" ) __a : Tuple = 1 __a : List[str] = [] with open(__a , encoding='utf-8' ) as f: for sentence in parse_incr(__a ): __a : Any = [] __a : Optional[int] = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__a ) == len(__a ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) ) guid_index += 1 return examples def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Tuple = 0 for sentence in parse_incr(__a ): __a : int = preds_list[example_id] __a : str = '' for token in sentence: out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(__a ) example_id += 1 def __UpperCAmelCase ( self , __a ): '''simple docstring''' if path: with open(__a , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
476
0
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __UpperCAmelCase( __a , unittest.TestCase ): """simple docstring""" __magic_name__ = CpmAntTokenizer __magic_name__ = False def UpperCAmelCase ( self ): """simple docstring""" super().setUp() A_ : List[Any] = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def UpperCAmelCase ( self ): """simple docstring""" A_ : Any = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) A_ : Optional[Any] = """今天天气真好!""" A_ : List[Any] = ["""今天""", """天气""", """真""", """好""", """!"""] A_ : Tuple = tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) A_ : List[Any] = """今天天气真好!""" A_ : Union[str, Any] = [tokenizer.bos_token] + tokens A_ : Dict = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ ) A_ : Dict = tokenizer.decode(a_ ) self.assertEqual(a_ , a_ )
706
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCAmelCase = { 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
236
0
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowercase : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=None , ): """simple docstring""" lowerCAmelCase__ : str = parent lowerCAmelCase__ : Dict = batch_size lowerCAmelCase__ : Optional[int] = decoder_seq_length # For common tests lowerCAmelCase__ : str = self.decoder_seq_length lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : Dict = use_attention_mask lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : int = d_model lowerCAmelCase__ : Dict = d_model lowerCAmelCase__ : Dict = decoder_layers lowerCAmelCase__ : Dict = decoder_layers lowerCAmelCase__ : Optional[int] = decoder_ffn_dim lowerCAmelCase__ : List[Any] = decoder_attention_heads lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Optional[int] = eos_token_id lowerCAmelCase__ : Tuple = bos_token_id lowerCAmelCase__ : int = pad_token_id lowerCAmelCase__ : Tuple = decoder_start_token_id lowerCAmelCase__ : List[Any] = use_cache lowerCAmelCase__ : str = max_position_embeddings lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = decoder_seq_length lowerCAmelCase__ : Optional[int] = 2 lowerCAmelCase__ : Tuple = 1 def lowercase_ ( self ): """simple docstring""" lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowerCAmelCase__ : Optional[int] = None if self.use_attention_mask: lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowerCAmelCase__ : Any = None if self.use_labels: lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowerCAmelCase__ : Union[str, Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): """simple docstring""" lowerCAmelCase__ : Any = True lowerCAmelCase__ : Optional[Any] = TrOCRDecoder(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval() lowerCAmelCase__ : Any = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowerCAmelCase__ : List[str] = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ : str = model(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) lowerCAmelCase__ : Optional[int] = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowerCAmelCase__ : str = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowerCAmelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase__ : str = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] lowerCAmelCase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice lowerCAmelCase__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase__ : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowerCAmelCase__ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) def lowercase_ ( self ): """simple docstring""" lowerCAmelCase__ : int = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): __a = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __a = (TrOCRForCausalLM,) if is_torch_available() else () __a = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} __a = True __a = False def lowercase_ ( self ): """simple docstring""" lowerCAmelCase__ : List[str] = TrOCRStandaloneDecoderModelTester(self , is_training=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ ) def lowercase_ ( self ): """simple docstring""" pass def lowercase_ ( self ): """simple docstring""" pass def lowercase_ ( self ): """simple docstring""" pass def lowercase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase_ ( self ): """simple docstring""" lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*SCREAMING_SNAKE_CASE__ ) def lowercase_ ( self ): """simple docstring""" return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def lowercase_ ( self ): """simple docstring""" pass
233
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration A__ : List[str] = pytest.mark.integration A__ : List[Any] = {"""comet"""} A__ : str = importlib.util.find_spec("""fairseq""") is not None A__ : str = {"""code_eval"""} A__ : List[Any] = os.name == """nt""" A__ : Optional[Any] = {"""bertscore""", """frugalscore""", """perplexity"""} A__ : List[str] = importlib.util.find_spec("""transformers""") is not None def _a ( __UpperCamelCase : Dict ): @wraps(__UpperCamelCase ) def wrapper(self : Dict ,__UpperCamelCase : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self ,__UpperCamelCase ) return wrapper def _a ( __UpperCamelCase : Optional[int] ): @wraps(__UpperCamelCase ) def wrapper(self : Any ,__UpperCamelCase : Optional[int] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self ,__UpperCamelCase ) return wrapper def _a ( __UpperCamelCase : Optional[int] ): @wraps(__UpperCamelCase ) def wrapper(self : Optional[int] ,__UpperCamelCase : List[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self ,__UpperCamelCase ) return wrapper def _a ( ): lowerCAmelCase__ : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @local class lowercase ( parameterized.TestCase ): __a = {} __a = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCAmelCase__ : int = '''[...]''' lowerCAmelCase__ : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path ) lowerCAmelCase__ : Tuple = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ ) # check parameters lowerCAmelCase__ : Tuple = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase__ : Dict = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = '''[...]''' lowerCAmelCase__ : List[str] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase__ : str = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ): yield else: yield @contextmanager def lowercase_ ( self ): """simple docstring""" def load_local_metric(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with patch('''datasets.load_metric''' ) as mock_load_metric: lowerCAmelCase__ : Optional[Any] = load_local_metric yield @classmethod def lowercase_ ( cls , SCREAMING_SNAKE_CASE__ ): """simple docstring""" def wrapper(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ : Any = contextmanager(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ : Tuple = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def _a ( __UpperCamelCase : int ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags class lowercase ( __UpperCamelCase ): def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ): """simple docstring""" assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: lowerCAmelCase__ : List[str] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def _a ( __UpperCamelCase : str ): import torch def bert_cos_score_idf(__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,*__UpperCamelCase : str ,**__UpperCamelCase : List[Any] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: lowerCAmelCase__ : List[str] = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def _a ( __UpperCamelCase : Tuple ): def load_from_checkpoint(__UpperCamelCase : Any ): class lowercase : def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): """simple docstring""" assert len(SCREAMING_SNAKE_CASE__ ) == 2 lowerCAmelCase__ : Dict = [0.19, 0.92] return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: lowerCAmelCase__ : str = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: lowerCAmelCase__ : Optional[Any] = load_from_checkpoint yield def _a ( ): lowerCAmelCase__ : int = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) ) lowerCAmelCase__ : Dict = '''ERROR''' lowerCAmelCase__ : Optional[int] = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ): metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
233
1
"""simple docstring""" import os import sys import transformers A = """3""" print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
706
"""simple docstring""" def __A ( a_ :float , a_ :float) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F'{price_plus_tax(100, 0.25) = }') print(F'{price_plus_tax(125.50, 0.05) = }')
101
0
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Dict = GPTaTokenizer __UpperCamelCase : Any = GPTaTokenizerFast __UpperCamelCase : str = True __UpperCamelCase : Tuple = {'''add_prefix_space''': True} __UpperCamelCase : Optional[int] = False def __magic_name__ (self ) -> Dict: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = """lower newer""" SCREAMING_SNAKE_CASE__ : str = """lower newer""" return input_text, output_text def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : Tuple = """lower newer""" SCREAMING_SNAKE_CASE__ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer""" # Testing tokenization SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : List[Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" pass def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Simple input SCREAMING_SNAKE_CASE__ : Dict = """This is a simple input""" SCREAMING_SNAKE_CASE__ : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ : Tuple = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input SCREAMING_SNAKE_CASE__ : str = """This is a simple input""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""This is a simple input looooooooong""", """This is a simple input"""] SCREAMING_SNAKE_CASE__ : int = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ : Optional[Any] = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : str = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : str = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = """$$$""" SCREAMING_SNAKE_CASE__ : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = """This is a simple input""" SCREAMING_SNAKE_CASE__ : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : str = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" pass def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Encode this.""" SCREAMING_SNAKE_CASE__ : Optional[Any] = """This one too please.""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode_plus( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : int = encoded_sequence_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ : Dict = encoded_sequence_dict["""special_tokens_mask"""] self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE__ ) ] SCREAMING_SNAKE_CASE__ : List[str] = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @require_tokenizers class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = """A photo of a cat""" SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained("""test_opt""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("""./test_opt""" ) SCREAMING_SNAKE_CASE__ : int = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 2_50, 13_45, 9, 10, 47_58] ) def __magic_name__ (self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = """A photo of a cat""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 2_50, 13_45, 9, 10, 47_58] ) @unittest.skip("""This test is failing because of a bug in the fast tokenizer""" ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = """bos""" SCREAMING_SNAKE_CASE__ : str = tokenizer.get_vocab()["""bos"""] SCREAMING_SNAKE_CASE__ : Tuple = """A photo of a cat""" SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained("""./tok""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("""./tok""" ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
223
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCAmelCase__ : List[str] = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase_ ( _snake_case ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase_ ( _snake_case ,_snake_case ): if args.student_type == "roberta": SCREAMING_SNAKE_CASE__ : Optional[Any] = False elif args.student_type == "gpt2": SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def lowercase_ ( _snake_case ,_snake_case ): if args.student_type == "roberta": SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" ,action="""store_true""" ,help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" ,type=_snake_case ,required=_snake_case ,help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" ,type=_snake_case ,required=_snake_case ,help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" ,) parser.add_argument( """--student_type""" ,type=_snake_case ,choices=["""distilbert""", """roberta""", """gpt2"""] ,required=_snake_case ,help="""The student type (DistilBERT, RoBERTa).""" ,) parser.add_argument("""--student_config""" ,type=_snake_case ,required=_snake_case ,help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" ,default=_snake_case ,type=_snake_case ,help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" ,choices=["""bert""", """roberta""", """gpt2"""] ,required=_snake_case ,help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" ,type=_snake_case ,required=_snake_case ,help="""The teacher model.""" ) parser.add_argument("""--temperature""" ,default=2.0 ,type=_snake_case ,help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" ,default=0.5 ,type=_snake_case ,help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" ,default=0.0 ,type=_snake_case ,help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" ,) parser.add_argument("""--alpha_clm""" ,default=0.5 ,type=_snake_case ,help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" ,default=0.0 ,type=_snake_case ,help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" ,default=0.0 ,type=_snake_case ,help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" ,action="""store_true""" ,help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" ,default=0.15 ,type=_snake_case ,help="""Proportion of tokens for which we need to make a prediction.""" ,) parser.add_argument("""--word_mask""" ,default=0.8 ,type=_snake_case ,help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" ,default=0.1 ,type=_snake_case ,help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" ,default=0.1 ,type=_snake_case ,help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" ,default=0.7 ,type=_snake_case ,help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" ,) parser.add_argument("""--token_counts""" ,type=_snake_case ,help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" ,action="""store_true""" ,help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" ,) parser.add_argument( """--freeze_pos_embs""" ,action="""store_true""" ,help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" ,) parser.add_argument( """--freeze_token_type_embds""" ,action="""store_true""" ,help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" ,) parser.add_argument("""--n_epoch""" ,type=_snake_case ,default=3 ,help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" ,type=_snake_case ,default=5 ,help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" ,action="""store_false""" ,help="""If true, group sequences that have similar length into the same batch. Default is true.""" ,) parser.add_argument( """--gradient_accumulation_steps""" ,type=_snake_case ,default=50 ,help="""Gradient accumulation for larger training batches.""" ,) parser.add_argument("""--warmup_prop""" ,default=0.05 ,type=_snake_case ,help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" ,default=0.0 ,type=_snake_case ,help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" ,default=5E-4 ,type=_snake_case ,help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" ,default=1E-6 ,type=_snake_case ,help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" ,default=5.0 ,type=_snake_case ,help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" ,default=0.02 ,type=_snake_case ,help="""Random initialization range.""" ) parser.add_argument( """--fp16""" ,action="""store_true""" ,help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" ,) parser.add_argument( """--fp16_opt_level""" ,type=_snake_case ,default="""O1""" ,help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) ,) parser.add_argument("""--n_gpu""" ,type=_snake_case ,default=1 ,help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" ,type=_snake_case ,default=-1 ,help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" ,type=_snake_case ,default=56 ,help="""Random seed""" ) parser.add_argument("""--log_interval""" ,type=_snake_case ,default=500 ,help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" ,type=_snake_case ,default=4_000 ,help="""Checkpoint interval.""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path ,"""parameters.json""" ) ,"""w""" ) as f: json.dump(vars(_snake_case ) ,_snake_case ,indent=4 ) git_log(args.dump_path ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = MODEL_CLASSES[args.student_type] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = MODEL_CLASSES[args.teacher_type] # TOKENIZER # SCREAMING_SNAKE_CASE__ : str = teacher_tokenizer_class.from_pretrained(args.teacher_name ) SCREAMING_SNAKE_CASE__ : int = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): SCREAMING_SNAKE_CASE__ : str = tokenizer.all_special_tokens.index(_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) SCREAMING_SNAKE_CASE__ : List[Any] = special_tok_ids SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file ,"""rb""" ) as fp: SCREAMING_SNAKE_CASE__ : Union[str, Any] = pickle.load(_snake_case ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts ,"""rb""" ) as fp: SCREAMING_SNAKE_CASE__ : List[Any] = pickle.load(_snake_case ) SCREAMING_SNAKE_CASE__ : Tuple = np.maximum(_snake_case ,1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0 # do not predict special tokens SCREAMING_SNAKE_CASE__ : int = torch.from_numpy(_snake_case ) else: SCREAMING_SNAKE_CASE__ : Tuple = None SCREAMING_SNAKE_CASE__ : Any = LmSeqsDataset(params=_snake_case ,data=_snake_case ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) SCREAMING_SNAKE_CASE__ : Tuple = student_config_class.from_pretrained(args.student_config ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights ,config=_snake_case ) else: SCREAMING_SNAKE_CASE__ : Tuple = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info("""Student loaded.""" ) # TEACHER # SCREAMING_SNAKE_CASE__ : str = teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case ,_snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case ,_snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() SCREAMING_SNAKE_CASE__ : int = Distiller( params=_snake_case ,dataset=_snake_case ,token_probs=_snake_case ,student=_snake_case ,teacher=_snake_case ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
223
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __a ( _lowercase ): """simple docstring""" def wrapper(*_lowercase , **_lowercase ): lowerCamelCase__ : int = timeit.default_timer() lowerCamelCase__ : Optional[Any] = func(*_lowercase , **_lowercase ) lowerCamelCase__ : int = timeit.default_timer() - starttime return delta lowerCamelCase__ : Optional[Any] = func.__name__ return wrapper def __a ( _lowercase , _lowercase=100 , _lowercase=None ): """simple docstring""" lowerCamelCase__ : Dict = [] lowerCamelCase__ : Union[str, Any] = seq_shapes or {} for i in range(_lowercase ): lowerCamelCase__ : Optional[Any] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_lowercase , _ArrayXD ): lowerCamelCase__ : Any = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_lowercase , datasets.Value ): if v.dtype == "string": lowerCamelCase__ : int = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCamelCase__ : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(_lowercase , datasets.Sequence ): while isinstance(_lowercase , datasets.Sequence ): lowerCamelCase__ : Optional[Any] = v.feature lowerCamelCase__ : List[str] = seq_shapes[k] lowerCamelCase__ : str = np.random.rand(*_lowercase ).astype(v.dtype ) lowerCamelCase__ : Dict = data dummy_data.append((i, example) ) return dummy_data def __a ( _lowercase , _lowercase , _lowercase=100 , _lowercase=None ): """simple docstring""" lowerCamelCase__ : List[str] = generate_examples(_lowercase , num_examples=_lowercase , seq_shapes=_lowercase ) with ArrowWriter(features=_lowercase , path=_lowercase ) as writer: for key, record in dummy_data: lowerCamelCase__ : str = features.encode_example(_lowercase ) writer.write(_lowercase ) lowerCamelCase__ , lowerCamelCase__ : Dict = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) lowerCamelCase__ : Optional[int] = datasets.Dataset.from_file(filename=_lowercase , info=datasets.DatasetInfo(features=_lowercase ) ) return dataset
121
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : str = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
121
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CanineTokenizer _lowercase = False def _UpperCamelCase( self : Dict ): super().setUp() a__ : str = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _UpperCamelCase( self : Union[str, Any] ): return CanineTokenizer.from_pretrained("google/canine-s" ) def _UpperCamelCase( self : Optional[Any] , **lowerCamelCase__ : Tuple ): a__ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) a__ : Optional[int] = 1_024 return tokenizer @require_torch def _UpperCamelCase( self : Tuple ): a__ : Dict = self.canine_tokenizer a__ : Union[str, Any] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off a__ : Dict = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on a__ : str = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) a__ : str = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = self.canine_tokenizer a__ : List[Any] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] a__ : Any = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , lowerCamelCase__ ) self.assertIn("attention_mask" , lowerCamelCase__ ) self.assertIn("token_type_ids" , lowerCamelCase__ ) @require_torch def _UpperCamelCase( self : List[str] ): a__ : int = self.canine_tokenizer a__ : List[Any] = [ "What's the weater?", "It's about 25 degrees.", ] a__ : Dict = tokenizer( text_target=lowerCamelCase__ , max_length=32 , padding="max_length" , truncation=lowerCamelCase__ , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _UpperCamelCase( self : int ): # safety check on max_len default value so we are sure the test works a__ : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a__ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc a__ : Optional[Any] = tempfile.mkdtemp() a__ : int = " He is very happy, UNwant\u00E9d,running" a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) a__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase__ ) a__ : Optional[int] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) shutil.rmtree(lowerCamelCase__ ) a__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc a__ : Tuple = tempfile.mkdtemp() a__ : int = " He is very happy, UNwant\u00E9d,running" a__ : Any = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a__ : List[Any] = chr(0XE007 ) additional_special_tokens.append(lowerCamelCase__ ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) a__ : str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) a__ : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase__ ) a__ : List[Any] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertIn(lowerCamelCase__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a__ : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__, a__ : Tuple = self.get_clean_sequence(lowerCamelCase__ ) # a special token for Canine can be defined as follows: a__ : List[Any] = 0XE005 a__ : Optional[Any] = chr(lowerCamelCase__ ) tokenizer.add_special_tokens({"cls_token": special_token} ) a__ : str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) a__ : Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase__ ) a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , input_encoded + special_token_id ) a__ : List[Any] = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) self.assertTrue(special_token not in decoded ) def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : List[Any] = chr(0XE005 ) a__ : str = chr(0XE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) a__ : Dict = tokenizer.tokenize(lowerCamelCase__ ) a__ : int = tokenizer.tokenize(lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) self.assertEqual(token_a[0] , lowerCamelCase__ ) self.assertEqual(token_a[0] , lowerCamelCase__ ) @require_tokenizers def _UpperCamelCase( self : Tuple ): a__ : Any = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: a__ : List[Any] = 0XE006 a__ : Union[str, Any] = chr(lowerCamelCase__ ) a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCamelCase__ ) tokenizer.from_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: a__ : Dict = json.load(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: a__ : Tuple = json.load(lowerCamelCase__ ) # a special token for Canine can be defined as follows: a__ : Optional[int] = 0XE006 a__ : Tuple = chr(lowerCamelCase__ ) a__ : List[Any] = [new_token_a] a__ : Optional[Any] = [new_token_a] with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(lowerCamelCase__ , lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(lowerCamelCase__ , lowerCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a__ : Any = tokenizer_class.from_pretrained(lowerCamelCase__ , extra_ids=0 ) self.assertIn(lowerCamelCase__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a__ : Optional[Any] = 0XE007 a__ : str = chr(lowerCamelCase__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a__ : List[str] = [AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ )] a__ : List[str] = tokenizer_class.from_pretrained( lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , extra_ids=0 ) self.assertIn(lowerCamelCase__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _UpperCamelCase( self : Union[str, Any] ): a__ : int = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : Optional[Any] = "hello world" if self.space_between_special_tokens: a__ : Any = "[CLS] hello world [SEP]" else: a__ : str = input a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Dict = tokenizer.decode(lowerCamelCase__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCamelCase__ , [output, output.lower()] ) def _UpperCamelCase( self : Dict ): a__ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : int = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] a__ : Optional[Any] = "a" a__ : Optional[Any] = ord(lowerCamelCase__ ) for attr in attributes_list: setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ ) setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ ) setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [] ) a__ : List[Any] = 0XE006 a__ : Dict = chr(lowerCamelCase__ ) setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _UpperCamelCase( self : str ): pass def _UpperCamelCase( self : int ): pass def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : List[Any] ): pass def _UpperCamelCase( self : Optional[int] ): pass
37
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __A (__magic_name__ ): def __get__( self , UpperCamelCase_ , UpperCamelCase_=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) __UpperCAmelCase : List[str] = "__cached_" + self.fget.__name__ __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if cached is None: __UpperCAmelCase : List[str] = self.fget(UpperCamelCase_ ) setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return cached def _lowercase ( lowerCamelCase__ ) -> List[str]: """simple docstring""" __UpperCAmelCase : Union[str, Any] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"""invalid truth value {val!r}""" ) def _lowercase ( lowerCamelCase__ ) -> Any: """simple docstring""" if is_torch_fx_proxy(lowerCamelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCamelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCamelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCamelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCamelCase__ , np.ndarray ) def _lowercase ( lowerCamelCase__ ) -> List[str]: """simple docstring""" return isinstance(lowerCamelCase__ , np.ndarray ) def _lowercase ( lowerCamelCase__ ) -> Tuple: """simple docstring""" return _is_numpy(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> Tuple: """simple docstring""" import torch return isinstance(lowerCamelCase__ , torch.Tensor ) def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" return False if not is_torch_available() else _is_torch(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> Tuple: """simple docstring""" import torch return isinstance(lowerCamelCase__ , torch.device ) def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" return False if not is_torch_available() else _is_torch_device(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> List[Any]: """simple docstring""" import torch if isinstance(lowerCamelCase__ , lowerCamelCase__ ): if hasattr(lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ ) else: return False return isinstance(lowerCamelCase__ , torch.dtype ) def _lowercase ( lowerCamelCase__ ) -> List[Any]: """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" import tensorflow as tf return isinstance(lowerCamelCase__ , tf.Tensor ) def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" return False if not is_tf_available() else _is_tensorflow(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> Optional[int]: """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCamelCase__ , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(lowerCamelCase__ ) return type(lowerCamelCase__ ) == tf.Tensor def _lowercase ( lowerCamelCase__ ) -> Tuple: """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> List[str]: """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(lowerCamelCase__ , jnp.ndarray ) def _lowercase ( lowerCamelCase__ ) -> Dict: """simple docstring""" return False if not is_flax_available() else _is_jax(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" if isinstance(lowerCamelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCamelCase__ ) for k, v in obj.items()} elif isinstance(lowerCamelCase__ , (list, tuple) ): return [to_py_obj(lowerCamelCase__ ) for o in obj] elif is_tf_tensor(lowerCamelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCamelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCamelCase__ ): return np.asarray(lowerCamelCase__ ).tolist() elif isinstance(lowerCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _lowercase ( lowerCamelCase__ ) -> str: """simple docstring""" if isinstance(lowerCamelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCamelCase__ ) for k, v in obj.items()} elif isinstance(lowerCamelCase__ , (list, tuple) ): return np.array(lowerCamelCase__ ) elif is_tf_tensor(lowerCamelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCamelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCamelCase__ ): return np.asarray(lowerCamelCase__ ) else: return obj class __A (__magic_name__ ): def _snake_case ( self ): __UpperCAmelCase : Any = fields(self ) # Safety and consistency checks if not len(UpperCamelCase_ ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) __UpperCAmelCase : Dict = getattr(self , class_fields[0].name ) __UpperCAmelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(UpperCamelCase_ ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : str = first_field.items() __UpperCAmelCase : Union[str, Any] = True else: try: __UpperCAmelCase : Optional[int] = iter(UpperCamelCase_ ) __UpperCAmelCase : Dict = True except TypeError: __UpperCAmelCase : Union[str, Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(UpperCamelCase_ ): if ( not isinstance(UpperCamelCase_ , (list, tuple) ) or not len(UpperCamelCase_ ) == 2 or not isinstance(element[0] , UpperCamelCase_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __UpperCAmelCase : Union[str, Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: __UpperCAmelCase : List[str] = element[1] elif first_field is not None: __UpperCAmelCase : Optional[int] = first_field else: for field in class_fields: __UpperCAmelCase : Any = getattr(self , field.name ) if v is not None: __UpperCAmelCase : Union[str, Any] = v def __delitem__( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self , UpperCamelCase_ ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[str] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , UpperCamelCase_ , UpperCamelCase_ ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(UpperCamelCase_ , UpperCamelCase_ ) super().__setattr__(UpperCamelCase_ , UpperCamelCase_ ) def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ): # Will raise a KeyException if needed super().__setitem__(UpperCamelCase_ , UpperCamelCase_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(UpperCamelCase_ , UpperCamelCase_ ) def _snake_case ( self ): return tuple(self[k] for k in self.keys() ) class __A (__magic_name__ , __magic_name__ ): @classmethod def _snake_case ( cls , UpperCamelCase_ ): raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class __A (__magic_name__ ): snake_case :Dict = "longest" snake_case :Dict = "max_length" snake_case :Union[str, Any] = "do_not_pad" class __A (__magic_name__ ): snake_case :Union[str, Any] = "pt" snake_case :List[str] = "tf" snake_case :Any = "np" snake_case :Union[str, Any] = "jax" class __A : def __init__( self , UpperCamelCase_ ): __UpperCAmelCase : Dict = context_managers __UpperCAmelCase : str = ExitStack() def __enter__( self ): for context_manager in self.context_managers: self.stack.enter_context(UpperCamelCase_ ) def __exit__( self , *UpperCamelCase_ , **UpperCamelCase_ ): self.stack.__exit__(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowercase ( lowerCamelCase__ ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = infer_framework(lowerCamelCase__ ) if framework == "tf": __UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __UpperCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: __UpperCAmelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _lowercase ( lowerCamelCase__ ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = model_class.__name__ __UpperCAmelCase : List[str] = infer_framework(lowerCamelCase__ ) if framework == "tf": __UpperCAmelCase : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __UpperCAmelCase : Tuple = inspect.signature(model_class.forward ) # PyTorch models else: __UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ) -> Optional[Any]: """simple docstring""" def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ): for k, v in d.items(): __UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) + delimiter + str(lowerCamelCase__ ) if parent_key else k if v and isinstance(lowerCamelCase__ , lowerCamelCase__ ): yield from flatten_dict(lowerCamelCase__ , lowerCamelCase__ , delimiter=lowerCamelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) @contextmanager def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[str, Any]: """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> str: """simple docstring""" if is_numpy_array(lowerCamelCase__ ): return np.transpose(lowerCamelCase__ , axes=lowerCamelCase__ ) elif is_torch_tensor(lowerCamelCase__ ): return array.T if axes is None else array.permute(*lowerCamelCase__ ) elif is_tf_tensor(lowerCamelCase__ ): import tensorflow as tf return tf.transpose(lowerCamelCase__ , perm=lowerCamelCase__ ) elif is_jax_tensor(lowerCamelCase__ ): return jnp.transpose(lowerCamelCase__ , axes=lowerCamelCase__ ) else: raise ValueError(f"""Type not supported for transpose: {type(lowerCamelCase__ )}.""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: """simple docstring""" if is_numpy_array(lowerCamelCase__ ): return np.reshape(lowerCamelCase__ , lowerCamelCase__ ) elif is_torch_tensor(lowerCamelCase__ ): return array.reshape(*lowerCamelCase__ ) elif is_tf_tensor(lowerCamelCase__ ): import tensorflow as tf return tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) elif is_jax_tensor(lowerCamelCase__ ): return jnp.reshape(lowerCamelCase__ , lowerCamelCase__ ) else: raise ValueError(f"""Type not supported for reshape: {type(lowerCamelCase__ )}.""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]: """simple docstring""" if is_numpy_array(lowerCamelCase__ ): return np.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ ) elif is_torch_tensor(lowerCamelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase__ ) elif is_tf_tensor(lowerCamelCase__ ): import tensorflow as tf return tf.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ ) elif is_jax_tensor(lowerCamelCase__ ): return jnp.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ ) else: raise ValueError(f"""Type not supported for squeeze: {type(lowerCamelCase__ )}.""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" if is_numpy_array(lowerCamelCase__ ): return np.expand_dims(lowerCamelCase__ , lowerCamelCase__ ) elif is_torch_tensor(lowerCamelCase__ ): return array.unsqueeze(dim=lowerCamelCase__ ) elif is_tf_tensor(lowerCamelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ ) elif is_jax_tensor(lowerCamelCase__ ): return jnp.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ ) else: raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" ) def _lowercase ( lowerCamelCase__ ) -> int: """simple docstring""" if is_numpy_array(lowerCamelCase__ ): return np.size(lowerCamelCase__ ) elif is_torch_tensor(lowerCamelCase__ ): return array.numel() elif is_tf_tensor(lowerCamelCase__ ): import tensorflow as tf return tf.size(lowerCamelCase__ ) elif is_jax_tensor(lowerCamelCase__ ): return array.size else: raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" ) def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any: """simple docstring""" for key, value in auto_map.items(): if isinstance(lowerCamelCase__ , (tuple, list) ): __UpperCAmelCase : List[str] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: __UpperCAmelCase : int = f"""{repo_id}--{value}""" return auto_map def _lowercase ( lowerCamelCase__ ) -> List[str]: """simple docstring""" for base_class in inspect.getmro(lowerCamelCase__ ): __UpperCAmelCase : Tuple = base_class.__module__ __UpperCAmelCase : Union[str, Any] = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"""Could not infer framework from class {model_class}.""" )
168
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ): _A : Optional[Any] = 'mra' def __init__( self , lowerCamelCase=5_02_65 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=1 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-5 , lowerCamelCase="absolute" , lowerCamelCase=4 , lowerCamelCase="full" , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ): super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase ) snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = type_vocab_size snake_case__ = layer_norm_eps snake_case__ = position_embedding_type snake_case__ = block_per_row snake_case__ = approx_mode snake_case__ = initial_prior_first_n_blocks snake_case__ = initial_prior_diagonal_n_blocks
530
from math import factorial def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 100 ): return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
530
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase : Optional[Any] = logging.get_logger(__name__) lowercase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowercase : Optional[Any] = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } lowercase : Optional[Any] = { "gpt2": 10_24, "gpt2-medium": 10_24, "gpt2-large": 10_24, "gpt2-xl": 10_24, "distilgpt2": 10_24, } class __lowercase ( lowerCamelCase__ ): """simple docstring""" UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Any = ['''input_ids''', '''attention_mask'''] UpperCAmelCase_ : List[str] = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__( a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , ) A : Dict = kwargs.pop('''add_bos_token''' , a__ ) A : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , a__ ) != add_prefix_space: A : List[Any] = getattr(a__ , pre_tok_state.pop('''type''' ) ) A : Dict = add_prefix_space A : str = pre_tok_class(**a__ ) A : int = add_prefix_space def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: A : Union[str, Any] = kwargs.get('''is_split_into_words''' , a__ ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a__ , **a__ ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: A : Optional[Any] = kwargs.get('''is_split_into_words''' , a__ ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*a__ , **a__ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[int]: A : Union[str, Any] = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def snake_case ( self , __UpperCAmelCase ) -> List[Any]: A : int = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] ) if len(a__ ) > self.model_max_length: A : Dict = input_ids[-self.model_max_length :] return input_ids
542
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A ( lowerCamelCase__ ): """simple docstring""" def __init__( self , a__ , a__ , a__): """simple docstring""" super().__init__() self.register_modules(vqvae=a__ , unet=a__ , scheduler=a__) @torch.no_grad() def __call__( self , a__ = 1 , a__ = None , a__ = 0.0 , a__ = 50 , a__ = "pil" , a__ = True , **a__ , ): """simple docstring""" _lowerCamelCase : Tuple = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a__ , ) _lowerCamelCase : Optional[int] = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler _lowerCamelCase : Tuple = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a__) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature _lowerCamelCase : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) _lowerCamelCase : Union[str, Any] = {} if accepts_eta: _lowerCamelCase : List[Any] = eta for t in self.progress_bar(self.scheduler.timesteps): _lowerCamelCase : List[Any] = self.scheduler.scale_model_input(a__ , a__) # predict the noise residual _lowerCamelCase : Optional[int] = self.unet(a__ , a__).sample # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : List[str] = self.scheduler.step(a__ , a__ , a__ , **a__).prev_sample # decode the image latents with the VAE _lowerCamelCase : Optional[int] = self.vqvae.decode(a__).sample _lowerCamelCase : Dict = (image / 2 + 0.5).clamp(0 , 1) _lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _lowerCamelCase : List[Any] = self.numpy_to_pil(a__) if not return_dict: return (image,) return ImagePipelineOutput(images=a__)
114
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Tuple ,*_a : List[str] ,**_a : str ): '''simple docstring''' warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
1
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : int=2 , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : str=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int="last" , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_lengths _A = use_token_type_ids _A = use_labels _A = gelu_activation _A = sinusoidal_embeddings _A = causal _A = asm _A = n_langs _A = vocab_size _A = n_special _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = summary_type _A = use_proj _A = scope def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_input_lengths: _A = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , 2 ).float() _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , ): '''simple docstring''' _A = FlaubertModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE ) _A = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE ) _A = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , ): '''simple docstring''' _A = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , ): '''simple docstring''' _A = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = model(_SCREAMING_SNAKE_CASE ) _A = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , ): '''simple docstring''' _A = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = model(_SCREAMING_SNAKE_CASE ) _A = model( _SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , ) _A = model( _SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , ) (_A ) = result_with_labels.to_tuple() _A = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE ) (_A ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , ): '''simple docstring''' _A = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = model(_SCREAMING_SNAKE_CASE ) _A = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , ): '''simple docstring''' _A = self.num_labels _A = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , ): '''simple docstring''' _A = self.num_choices _A = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( _A ) = config_and_inputs _A = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" snake_case = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case = ( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=False ): '''simple docstring''' _A = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _A = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) _A = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def lowerCAmelCase ( self : str ): '''simple docstring''' _A = FlaubertModelTester(self ) _A = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE ) @slow def lowerCAmelCase ( self : Tuple ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _A = True _A = model_class(config=_SCREAMING_SNAKE_CASE ) _A = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _A = torch.jit.trace( _SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) ) _A = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) , map_location=_SCREAMING_SNAKE_CASE ) loaded(inputs_dict["input_ids"].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(_SCREAMING_SNAKE_CASE ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) _A = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): _A = model(_SCREAMING_SNAKE_CASE )[0] _A = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) _A = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
330
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Tuple: snake_case_ : Optional[int] = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def _lowerCAmelCase ( self ) -> str: with self.assertRaises(_SCREAMING_SNAKE_CASE ): snake_case_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def _lowerCAmelCase ( self ) -> int: with self.assertRaises(_SCREAMING_SNAKE_CASE ): snake_case_ : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) ) def _lowerCAmelCase ( self ) -> Dict: snake_case_ : Optional[int] = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _lowerCAmelCase ( self ) -> Tuple: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): snake_case_ : Tuple = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) ) def _lowerCAmelCase ( self ) -> Optional[int]: snake_case_ : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _lowerCAmelCase ( self ) -> List[Any]: snake_case_ : List[Any] = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) ) self.assertEqual(arr.type , pa.string() ) def _lowerCAmelCase ( self ) -> Optional[Any]: snake_case_ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def _lowerCAmelCase ( self ) -> Tuple: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): snake_case_ : Tuple = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) ) def _lowerCAmelCase ( self ) -> List[Any]: snake_case_ : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def _lowerCAmelCase ( self ) -> List[Any]: snake_case_ : Optional[Any] = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def _lowerCAmelCase ( self ) -> Optional[Any]: import PIL.Image snake_case_ : List[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( "datasets.arrow_writer.cast_to_python_objects" , side_effect=_SCREAMING_SNAKE_CASE ) as mock_cast_to_python_objects: snake_case_ : str = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) ) snake_case_ , snake_case_ : List[str] = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting" , _SCREAMING_SNAKE_CASE ) self.assertFalse(kwargs["optimize_list_casting"] ) def lowerCAmelCase__ ( _a : int , _a : int ): snake_case_ : str = pa.BufferReader(_a ) if isinstance(_a , pa.Buffer ) else pa.memory_map(_a ) snake_case_ : Optional[int] = pa.ipc.open_stream(_a ) snake_case_ : pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def lowerCAmelCase__ ( _a : Any , _a : Tuple ): snake_case_ : Optional[int] = pa.BufferOutputStream() snake_case_ : Tuple = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) snake_case_ , snake_case_ : int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ : Any = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def lowerCAmelCase__ ( ): snake_case_ : str = pa.BufferOutputStream() snake_case_ : Tuple = Features({"labels": ClassLabel(names=["neg", "pos"] )} ) with ArrowWriter(stream=_a , features=_a ) as writer: writer.write({"labels": 0} ) writer.write({"labels": 1} ) snake_case_ , snake_case_ : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata snake_case_ : List[Any] = pa.BufferReader(output.getvalue() ) snake_case_ : Dict = pa.ipc.open_stream(_a ) snake_case_ : pa.Table = f.read_all() snake_case_ : Tuple = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_a ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) def lowerCAmelCase__ ( _a : Optional[int] ): snake_case_ : List[str] = pa.BufferOutputStream() with ArrowWriter( stream=_a , writer_batch_size=_a , hash_salt="split_name" , check_duplicates=_a , ) as writer: with pytest.raises(_a ): writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] ) snake_case_ , snake_case_ : int = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def lowerCAmelCase__ ( _a : List[Any] ): snake_case_ : int = pa.BufferOutputStream() with ArrowWriter( stream=_a , writer_batch_size=_a , hash_salt="split_name" , check_duplicates=_a , ) as writer: with pytest.raises(_a ): writer.write({"col_1": "foo", "col_2": 1} , key=10 ) writer.write({"col_1": "bar", "col_2": 2} , key=10 ) snake_case_ , snake_case_ : int = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def lowerCAmelCase__ ( _a : List[str] ): snake_case_ : List[Any] = pa.BufferOutputStream() with ArrowWriter( stream=_a , writer_batch_size=_a , hash_salt="split_name" , check_duplicates=_a , ) as writer: writer.write({"col_1": "foo", "col_2": 1} , key=1 ) writer.write({"col_1": "bar", "col_2": 2} , key=2 ) snake_case_ , snake_case_ : Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Optional[int] ): snake_case_ : Union[str, Any] = pa.BufferOutputStream() snake_case_ : List[str] = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) writer.write_batch({"col_1": [], "col_2": []} ) snake_case_ , snake_case_ : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ : Dict = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def lowerCAmelCase__ ( _a : List[str] , _a : Optional[int] ): snake_case_ : List[Any] = pa.BufferOutputStream() snake_case_ : int = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) ) snake_case_ , snake_case_ : List[str] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ : str = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def lowerCAmelCase__ ( _a : Dict , _a : Optional[Any] ): snake_case_ : List[str] = pa.BufferOutputStream() snake_case_ : Optional[Any] = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) ) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) ) snake_case_ , snake_case_ : int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: snake_case_ : Tuple = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def lowerCAmelCase__ ( ): with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = {"col_1": pa.string(), "col_2": pa.intaa()} snake_case_ : Union[str, Any] = os.path.join(_a , "test.arrow" ) with ArrowWriter(path=_a , schema=pa.schema(_a ) ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) snake_case_ , snake_case_ : int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(_a , 1 ) def lowerCAmelCase__ ( _a : Optional[Any] ): if pa.types.is_list(_a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def lowerCAmelCase__ ( _a : Optional[Any] , _a : Any ): if isinstance(lst[0] , _a ): change_first_primitive_element_in_list(lst[0] , _a ) else: snake_case_ : str = value @pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def lowerCAmelCase__ ( _a : Optional[Any] , _a : List[Any] , _a : List[str] ): snake_case_ : int = pa.array(TypedSequence(_a , optimized_int_type=_a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype" , [ ("attention_mask", pa.inta()), ("special_tokens_mask", pa.inta()), ("token_type_ids", pa.inta()), ("input_ids", pa.intaa()), ("other", pa.intaa()), ] , ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def lowerCAmelCase__ ( _a : Tuple , _a : Optional[int] , _a : int ): # in range snake_case_ : List[Any] = pa.array(OptimizedTypedSequence(_a , col=_a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications snake_case_ : Tuple = copy.deepcopy(_a ) snake_case_ : Any = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_a , _a ) snake_case_ : Tuple = pa.array(OptimizedTypedSequence(_a , col=_a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("raise_exception" , [False, True] ) def lowerCAmelCase__ ( _a : List[str] , _a : int ): snake_case_ : int = str(tmp_path / "dataset-train.arrow" ) try: with ArrowWriter(path=_a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def lowerCAmelCase__ ( _a : Union[str, Any] ): snake_case_ : Tuple = "mock://dataset-train.arrow" with ArrowWriter(path=_a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(_a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) snake_case_ , snake_case_ : str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_a ) def lowerCAmelCase__ ( ): snake_case_ : Optional[int] = pa.BufferOutputStream() with ParquetWriter(stream=_a ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) snake_case_ , snake_case_ : Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 snake_case_ : Optional[Any] = pa.BufferReader(output.getvalue() ) snake_case_ : pa.Table = pq.read_table(_a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files" , [False, True] ) def lowerCAmelCase__ ( _a : List[str] , _a : str ): import PIL.Image snake_case_ : Tuple = str(tmp_path / "test_image_rgb.jpg" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_a , format="png" ) snake_case_ : Dict = pa.BufferOutputStream() with ParquetWriter( stream=_a , features=Features({"image": Image()} ) , embed_local_files=_a ) as writer: writer.write({"image": image_path} ) writer.finalize() snake_case_ : Optional[Any] = pa.BufferReader(output.getvalue() ) snake_case_ : pa.Table = pq.read_table(_a ) snake_case_ : Tuple = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"] , _a ) with open(_a , "rb" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def lowerCAmelCase__ ( ): snake_case_ : str = pa.schema([pa.field("col_1" , pa.string() , nullable=_a )] ) snake_case_ : List[Any] = pa.BufferOutputStream() with ArrowWriter(stream=_a ) as writer: writer._build_writer(inferred_schema=_a ) assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
568
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase ( __lowercase ): __lowercase : Union[str, Any] = ['pixel_values'] def __init__( self , A_ = True , A_ = 1 / 255 , A_ = True , A_ = 8 , **A_ , ) -> None: """simple docstring""" super().__init__(**__A ) UpperCamelCase = do_rescale UpperCamelCase = rescale_factor UpperCamelCase = do_pad UpperCamelCase = pad_size def __UpperCamelCase ( self , A_ , A_ , A_ = None , **A_ ) -> np.ndarray: """simple docstring""" return rescale(__A , scale=__A , data_format=__A , **__A ) def __UpperCamelCase ( self , A_ , A_ , A_ = None ) -> int: """simple docstring""" UpperCamelCase , UpperCamelCase = get_image_size(__A ) UpperCamelCase = (old_height // size + 1) * size - old_height UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__A , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__A ) def __UpperCamelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> Dict: """simple docstring""" UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase = do_pad if do_pad is not None else self.do_pad UpperCamelCase = pad_size if pad_size is not None else self.pad_size UpperCamelCase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. UpperCamelCase = [to_numpy_array(__A ) for image in images] if do_rescale: UpperCamelCase = [self.rescale(image=__A , scale=__A ) for image in images] if do_pad: UpperCamelCase = [self.pad(__A , size=__A ) for image in images] UpperCamelCase = [to_channel_dimension_format(__A , __A ) for image in images] UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__A , tensor_type=__A )
713
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCAmelCase : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): UpperCamelCase = getattr(lowercase , lowercase ) if weight_type is not None: UpperCamelCase = getattr(lowercase , lowercase ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A ( lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(lowercase )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , lowercase ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase ) @torch.no_grad() def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int: '''simple docstring''' if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(lowercase , 'vocab.json' ) if not os.path.isdir(lowercase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase , lowercase ) UpperCamelCase = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) UpperCamelCase = WavaVecaConformerForCTC(lowercase ) else: UpperCamelCase = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(lowercase ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) UpperCamelCase = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCAmelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
0
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> float: _validate_point(snake_case__ ) _validate_point(snake_case__ ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[float] ) -> None: if point: if isinstance(snake_case__ , snake_case__ ): for item in point: if not isinstance(snake_case__ , (int, float) ): _lowercase = ( 'Expected a list of numbers as input, found ' F"""{type(snake_case__ ).__name__}""" ) raise TypeError(snake_case__ ) else: _lowercase = F"""Expected a list of numbers as input, found {type(snake_case__ ).__name__}""" raise TypeError(snake_case__ ) else: raise ValueError('Missing an input' ) def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> float: _validate_point(snake_case__ ) _validate_point(snake_case__ ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
67
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings A : Tuple = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(SCREAMING_SNAKE_CASE__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''rag''' lowerCamelCase__ = True def __init__( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=True , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=" / " , __magic_name__ : int=" // " , __magic_name__ : Any=5 , __magic_name__ : Dict=300 , __magic_name__ : Optional[Any]=768 , __magic_name__ : str=8 , __magic_name__ : List[Any]="wiki_dpr" , __magic_name__ : Any="train" , __magic_name__ : Any="compressed" , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=True , __magic_name__ : str=False , __magic_name__ : int=False , __magic_name__ : Tuple=False , __magic_name__ : Tuple=True , __magic_name__ : Dict=None , **__magic_name__ : int , ) -> List[str]: super().__init__( bos_token_id=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , prefix=__magic_name__ , vocab_size=__magic_name__ , **__magic_name__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" SCREAMING_SNAKE_CASE_ = kwargs.pop("question_encoder" ) SCREAMING_SNAKE_CASE_ = question_encoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE_ = kwargs.pop("generator" ) SCREAMING_SNAKE_CASE_ = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = reduce_loss SCREAMING_SNAKE_CASE_ = label_smoothing SCREAMING_SNAKE_CASE_ = exclude_bos_score SCREAMING_SNAKE_CASE_ = do_marginalize SCREAMING_SNAKE_CASE_ = title_sep SCREAMING_SNAKE_CASE_ = doc_sep SCREAMING_SNAKE_CASE_ = n_docs SCREAMING_SNAKE_CASE_ = max_combined_length SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = dataset_split SCREAMING_SNAKE_CASE_ = index_name SCREAMING_SNAKE_CASE_ = retrieval_vector_size SCREAMING_SNAKE_CASE_ = retrieval_batch_size SCREAMING_SNAKE_CASE_ = passages_path SCREAMING_SNAKE_CASE_ = index_path SCREAMING_SNAKE_CASE_ = use_dummy_dataset SCREAMING_SNAKE_CASE_ = output_retrieved SCREAMING_SNAKE_CASE_ = do_deduplication SCREAMING_SNAKE_CASE_ = use_cache if self.forced_eos_token_id is None: SCREAMING_SNAKE_CASE_ = getattr(self.generator , "forced_eos_token_id" , __magic_name__ ) @classmethod def __A ( cls : Dict , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : List[str] ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__magic_name__ ) def __A ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.question_encoder.to_dict() SCREAMING_SNAKE_CASE_ = self.generator.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
356
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar A : Dict = TypeVar("T") class lowerCamelCase (Generic[T] ): """simple docstring""" def __init__( self : Optional[int] , __magic_name__ : bool = True ) -> None: SCREAMING_SNAKE_CASE_ = {} # dictionary of lists SCREAMING_SNAKE_CASE_ = directed def __A ( self : Dict , __magic_name__ : T , __magic_name__ : T ) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__magic_name__ ) self.adj_list[destination_vertex].append(__magic_name__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__magic_name__ ) SCREAMING_SNAKE_CASE_ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__magic_name__ ) SCREAMING_SNAKE_CASE_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE_ = [destination_vertex] SCREAMING_SNAKE_CASE_ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__magic_name__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__magic_name__ ) SCREAMING_SNAKE_CASE_ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE_ = [destination_vertex] SCREAMING_SNAKE_CASE_ = [] return self def __repr__( self : int ) -> str: return pformat(self.adj_list )
356
1
import math import sys def __lowerCAmelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' if number != int(_UpperCamelCase ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 SCREAMING_SNAKE_CASE = [-1] * (number + 1) SCREAMING_SNAKE_CASE = 0 for i in range(1 , number + 1 ): SCREAMING_SNAKE_CASE = sys.maxsize SCREAMING_SNAKE_CASE = int(math.sqrt(_UpperCamelCase ) ) for j in range(1 , root + 1 ): SCREAMING_SNAKE_CASE = 1 + answers[i - (j**2)] SCREAMING_SNAKE_CASE = min(_UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
439
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase ( unittest.TestCase ): def UpperCamelCase ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = (3_2, 3_2) SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ ) return image @property def UpperCamelCase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) return model @property def UpperCamelCase ( self : str ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def UpperCamelCase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(snake_case__ ) @property def UpperCamelCase ( self : Dict ): """simple docstring""" def extract(*snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ): class UpperCamelCase : def __init__( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = torch.ones([0] ) def UpperCamelCase ( self : Any , snake_case__ : List[str] ): """simple docstring""" self.pixel_values.to(snake_case__ ) return self return Out() return extract def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.dummy_cond_unet SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ ) SCREAMING_SNAKE_CASE = self.dummy_vae SCREAMING_SNAKE_CASE = self.dummy_text_encoder SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) SCREAMING_SNAKE_CASE = 7_7 SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ ) SCREAMING_SNAKE_CASE = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline( unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , ) SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ ) SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ ) alt_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger' SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 ) SCREAMING_SNAKE_CASE = alt_pipe( [prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , ) SCREAMING_SNAKE_CASE = output.images SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 ) SCREAMING_SNAKE_CASE = alt_pipe( [prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , return_dict=snake_case__ , )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) SCREAMING_SNAKE_CASE = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = self.dummy_cond_unet SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ ) SCREAMING_SNAKE_CASE = self.dummy_vae SCREAMING_SNAKE_CASE = self.dummy_text_encoder SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) SCREAMING_SNAKE_CASE = 7_7 SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ ) # put models in fp16 SCREAMING_SNAKE_CASE = unet.half() SCREAMING_SNAKE_CASE = vae.half() SCREAMING_SNAKE_CASE = bert.half() # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline( unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , ) SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ ) SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ ) alt_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger' SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = alt_pipe( [prompt] , generator=snake_case__ , num_inference_steps=2 , output_type='np' , image=snake_case__ , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE = init_image.resize((7_6_0, 5_0_4) ) SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion' SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation' SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , ) SCREAMING_SNAKE_CASE = output.images[0] SCREAMING_SNAKE_CASE = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) SCREAMING_SNAKE_CASE = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def UpperCamelCase ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) SCREAMING_SNAKE_CASE = init_image.resize((7_6_8, 5_1_2) ) SCREAMING_SNAKE_CASE = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' ) SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion' SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation' SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , ) SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
439
1
import random from .binary_exp_mod import bin_exp_mod def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=1000 ) -> List[str]: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __lowerCamelCase = n - 1 __lowerCamelCase = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __lowerCamelCase = 0 while count < prec: __lowerCamelCase = random.randint(2 , n - 1 ) __lowerCamelCase = bin_exp_mod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if b != 1: __lowerCamelCase = True for _ in range(UpperCamelCase__ ): if b == n - 1: __lowerCamelCase = False break __lowerCamelCase = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __A = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
167
from __future__ import annotations import numpy as np def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]: """simple docstring""" __lowerCamelCase , __lowerCamelCase = np.shape(UpperCamelCase__ ) if rows != columns: __lowerCamelCase = ( '\'table\' has to be of square shaped array but got a ' F"""{rows}x{columns} array:\n{table}""" ) raise ValueError(UpperCamelCase__ ) __lowerCamelCase = np.zeros((rows, columns) ) __lowerCamelCase = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) __lowerCamelCase = (table[i][j] - total) / upper[j][j] __lowerCamelCase = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): __lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) __lowerCamelCase = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
167
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
370
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]: _UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length return scores def _lowercase ( self ) -> Optional[int]: _UpperCamelCase : int = None _UpperCamelCase : int = 20 _UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case ) # tweak scores to not be uniform anymore _UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch _UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax _UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 ) _UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 ) _UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 ) _UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _lowercase ( self ) -> Any: _UpperCamelCase : List[Any] = None _UpperCamelCase : Optional[int] = 10 _UpperCamelCase : Any = 2 # create ramp distribution _UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() _UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size _UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 ) _UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case _UpperCamelCase : Optional[int] = 5 _UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) _UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy() _UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _lowercase ( self ) -> Optional[int]: _UpperCamelCase : Any = None _UpperCamelCase : Any = 10 _UpperCamelCase : List[Any] = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) _UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) _UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 ) _UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 _UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # check edge cases with negative and extreme logits _UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme _UpperCamelCase : Tuple = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept _UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) _UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _lowercase ( self ) -> Dict: _UpperCamelCase : List[Any] = 20 _UpperCamelCase : Optional[int] = 4 _UpperCamelCase : int = 0 _UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case ) # check that min length is applied at length 5 _UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 ) _UpperCamelCase : int = 5 _UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 _UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : Optional[Any] = 15 _UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case ) self.assertFalse(jnp.isinf(_snake_case ).any() ) def _lowercase ( self ) -> List[Any]: _UpperCamelCase : Optional[int] = 20 _UpperCamelCase : Union[str, Any] = 4 _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case ) # check that all scores are -inf except the bos_token_id score _UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 ) _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 _UpperCamelCase : List[str] = 3 _UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case ) self.assertFalse(jnp.isinf(_snake_case ).any() ) def _lowercase ( self ) -> str: _UpperCamelCase : Dict = 20 _UpperCamelCase : Tuple = 4 _UpperCamelCase : Any = 0 _UpperCamelCase : str = 5 _UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case ) # check that all scores are -inf except the eos_token_id when max_length is reached _UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 ) _UpperCamelCase : Dict = 4 _UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached _UpperCamelCase : Optional[int] = 3 _UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case ) self.assertFalse(jnp.isinf(_snake_case ).any() ) def _lowercase ( self ) -> str: _UpperCamelCase : Dict = 4 _UpperCamelCase : Optional[Any] = 10 _UpperCamelCase : Dict = 15 _UpperCamelCase : Union[str, Any] = 2 _UpperCamelCase : Optional[Any] = 1 _UpperCamelCase : List[Any] = 15 # dummy input_ids and scores _UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case ) _UpperCamelCase : Any = input_ids.copy() _UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : List[str] = scores.copy() # instantiate all dist processors _UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 ) _UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case ) _UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case ) _UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case ) _UpperCamelCase : List[str] = 10 # no processor list _UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case ) # with processor list _UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case ) # scores should be equal self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _lowercase ( self ) -> Tuple: _UpperCamelCase : Tuple = 4 _UpperCamelCase : int = 10 _UpperCamelCase : List[Any] = 15 _UpperCamelCase : Dict = 2 _UpperCamelCase : Tuple = 1 _UpperCamelCase : Optional[int] = 15 # dummy input_ids and scores _UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case ) _UpperCamelCase : Optional[Any] = input_ids.copy() _UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case ) _UpperCamelCase : Optional[int] = scores.copy() # instantiate all dist processors _UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 ) _UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case ) _UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case ) _UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case ) _UpperCamelCase : Union[str, Any] = 10 # no processor list def run_no_processor_list(_snake_case , _snake_case , _snake_case ): _UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case ) _UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case ) return scores # with processor list def run_processor_list(_snake_case , _snake_case , _snake_case ): _UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case ) return scores _UpperCamelCase : Dict = jax.jit(_snake_case ) _UpperCamelCase : Optional[int] = jax.jit(_snake_case ) _UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case ) _UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case ) # scores should be equal self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
683
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = "Salesforce/blip-image-captioning-base" _a = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) _a = "image_captioner" _a = AutoModelForVisionaSeq _a = ["image"] _a = ["text"] def __init__( self : Any , *__a : List[str] , **__a : Union[str, Any] ) ->int: requires_backends(self , ["""vision"""] ) super().__init__(*__a , **__a ) def _lowerCAmelCase ( self : Dict , __a : "Image" ) ->str: return self.pre_processor(images=__a , return_tensors="""pt""" ) def _lowerCAmelCase ( self : Union[str, Any] , __a : int ) ->Union[str, Any]: return self.model.generate(**__a ) def _lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] ) ->int: return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
720
from math import asin, atan, cos, radians, sin, sqrt, tan snake_case__ : List[Any] = 6_3_7_8_1_3_7.0 snake_case__ : List[str] = 6_3_5_6_7_5_2.3_1_4_2_4_5 snake_case__ : int = 637_8137 def __lowerCamelCase ( A__ : float , A__ : float , A__ : float , A__ : float ) -> float: lowerCamelCase_ : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A lowerCamelCase_ : int = atan((1 - flattening) * tan(radians(A__ ) ) ) lowerCamelCase_ : List[Any] = atan((1 - flattening) * tan(radians(A__ ) ) ) lowerCamelCase_ : Union[str, Any] = radians(A__ ) lowerCamelCase_ : Tuple = radians(A__ ) # Equation lowerCamelCase_ : str = sin((phi_a - phi_a) / 2 ) lowerCamelCase_ : str = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda lowerCamelCase_ : List[str] = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
171
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class A_ (a_ ): """simple docstring""" a__ = '''dpt''' def __init__( self :Dict , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , lowerCAmelCase__ :Optional[Any]=384 , lowerCAmelCase__ :Dict=16 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Optional[int]=[2, 5, 8, 11] , lowerCAmelCase__ :Optional[Any]="project" , lowerCAmelCase__ :Dict=[4, 2, 1, 0.5] , lowerCAmelCase__ :Dict=[96, 192, 384, 768] , lowerCAmelCase__ :int=256 , lowerCAmelCase__ :int=-1 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=True , lowerCAmelCase__ :str=0.4 , lowerCAmelCase__ :Any=255 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :str=[1, 1_024, 24, 24] , lowerCAmelCase__ :str=[0, 1] , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :List[Any] , ) -> Any: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : str = hidden_size snake_case_ : Optional[int] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) snake_case_ : Any = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } snake_case_ : str = BitConfig(**lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info("Initializing the config with a `BiT` backbone." ) snake_case_ : Tuple = BitConfig(**lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : str = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) snake_case_ : Dict = backbone_featmap_shape snake_case_ : Any = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: snake_case_ : Tuple = None snake_case_ : List[Any] = None snake_case_ : int = [] snake_case_ : Tuple = num_hidden_layers snake_case_ : Any = num_attention_heads snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Any = hidden_act snake_case_ : str = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : str = layer_norm_eps snake_case_ : str = image_size snake_case_ : Dict = patch_size snake_case_ : List[Any] = num_channels snake_case_ : Optional[Any] = qkv_bias snake_case_ : List[str] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) snake_case_ : List[Any] = readout_type snake_case_ : Optional[Any] = reassemble_factors snake_case_ : Union[str, Any] = neck_hidden_sizes snake_case_ : Tuple = fusion_hidden_size snake_case_ : Dict = head_in_index snake_case_ : Optional[int] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) snake_case_ : str = use_auxiliary_head snake_case_ : List[str] = auxiliary_loss_weight snake_case_ : Union[str, Any] = semantic_loss_ignore_index snake_case_ : Dict = semantic_classifier_dropout def _A ( self :int ) -> Any: '''simple docstring''' snake_case_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: snake_case_ : List[str] = self.backbone_config.to_dict() snake_case_ : Tuple = self.__class__.model_type return output
653
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
1
'''simple docstring''' import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class SCREAMING_SNAKE_CASE ( _a , _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 1 @register_to_config def __init__( self : List[Any] , UpperCamelCase__ : str=2_0_0_0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=2_0 , UpperCamelCase__ : Union[str, Any]=1E-3 ): """simple docstring""" UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, torch.device] = None ): """simple docstring""" UpperCamelCase = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ ) def A ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score UpperCamelCase = ( -0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) UpperCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) UpperCamelCase = std.flatten() while len(std.shape ) < len(score.shape ): UpperCamelCase = std.unsqueeze(-1 ) UpperCamelCase = -score / std # compute UpperCamelCase = -1.0 / len(self.timesteps ) UpperCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) UpperCamelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): UpperCamelCase = beta_t.unsqueeze(-1 ) UpperCamelCase = -0.5 * beta_t * x UpperCamelCase = torch.sqrt(UpperCamelCase__ ) UpperCamelCase = drift - diffusion**2 * score UpperCamelCase = x + drift * dt # add noise UpperCamelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype ) UpperCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : List[str] ): """simple docstring""" return self.config.num_train_timesteps
707
'''simple docstring''' def __lowerCamelCase ( A__ , A__ , A__ ) -> float: """simple docstring""" if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate UpperCamelCase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly UpperCamelCase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
324
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ : str = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig: """simple docstring""" UpperCAmelCase = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase = 192 UpperCAmelCase = 768 UpperCAmelCase = 12 UpperCAmelCase = 3 UpperCAmelCase = [800, 1_333] UpperCAmelCase = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase = 330 UpperCAmelCase = 14 UpperCAmelCase = 6 UpperCAmelCase = 1_320 elif "yolos_s" in yolos_name: UpperCAmelCase = 384 UpperCAmelCase = 1_536 UpperCAmelCase = 12 UpperCAmelCase = 6 elif "yolos_b" in yolos_name: UpperCAmelCase = [800, 1_344] UpperCAmelCase = 91 UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''coco-detection-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase = in_proj_weight[: config.hidden_size, :] UpperCAmelCase = in_proj_bias[: config.hidden_size] UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" if "backbone" in name: UpperCAmelCase = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict: """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[2] ) UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def __snake_case ( ) -> torch.Tensor: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model'''] # load 🤗 model UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512 UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes UpperCAmelCase, UpperCAmelCase = None, None if yolos_name == "yolos_ti": UpperCAmelCase = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) UpperCAmelCase = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) UpperCAmelCase = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) UpperCAmelCase = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) UpperCAmelCase = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": UpperCAmelCase = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) UpperCAmelCase = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: UpperCAmelCase = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) UpperCAmelCase = model_mapping[yolos_name] image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a__ : Optional[Any] = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
51
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): return round(float(moles / volume ) * nfactor ) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): return round(float((moles * 0.0_821 * temperature) / (volume) ) ) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): return round(float((moles * 0.0_821 * temperature) / (pressure) ) ) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): return round(float((pressure * volume) / (0.0_821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
276
0
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Union[str, Any] ) -> Optional[int]: __lowerCAmelCase = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __lowerCAmelCase = test_metrics @require_cpu def lowercase ( self : Tuple ) -> Optional[int]: debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def lowercase ( self : int ) -> List[str]: debug_launcher(self.test_metrics.main ) @require_single_gpu def lowercase ( self : Dict ) -> Dict: self.test_metrics.main() @require_multi_gpu def lowercase ( self : str ) -> str: print(f"""Found {torch.cuda.device_count()} devices.""" ) __lowerCAmelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() )
720
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase ( self : Union[str, Any] ) -> str: __lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) __lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCAmelCase = 'A painting of a squirrel eating a burger' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase ( self : Union[str, Any] ) -> Dict: __lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('sample_euler' ) __lowerCAmelCase = 'A painting of a squirrel eating a burger' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowercase ( self : int ) -> Dict: __lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) __lowerCAmelCase = 'A painting of a squirrel eating a burger' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase = np.array( [0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
421
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__: Dict = logging.get_logger(__name__) lowerCAmelCase__: Dict = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class snake_case_ ( A__ ): __lowerCamelCase : Union[str, Any] = """wavlm""" def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3_072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1e-5 , __lowerCAmelCase="group" , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=128 , __lowerCAmelCase=16 , __lowerCAmelCase=320 , __lowerCAmelCase=800 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=320 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=100 , __lowerCAmelCase=256 , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase="mean" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1_500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=80 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ): super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : Dict = feat_extract_norm SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_activation SCREAMING_SNAKE_CASE_ : Optional[int] = list(_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = list(_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = list(_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : Tuple = conv_bias SCREAMING_SNAKE_CASE_ : Dict = num_buckets SCREAMING_SNAKE_CASE_ : Any = max_bucket_distance SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_conv_pos_embeddings SCREAMING_SNAKE_CASE_ : Tuple = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.conv_dim ) SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout SCREAMING_SNAKE_CASE_ : List[str] = activation_dropout SCREAMING_SNAKE_CASE_ : Tuple = feat_proj_dropout SCREAMING_SNAKE_CASE_ : Dict = final_dropout SCREAMING_SNAKE_CASE_ : Optional[Any] = layerdrop SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = num_ctc_classes SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : str = do_stable_layer_norm SCREAMING_SNAKE_CASE_ : Tuple = use_weighted_layer_sum SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE_ : str = apply_spec_augment SCREAMING_SNAKE_CASE_ : List[str] = mask_time_prob SCREAMING_SNAKE_CASE_ : Optional[int] = mask_time_length SCREAMING_SNAKE_CASE_ : Any = mask_time_min_masks SCREAMING_SNAKE_CASE_ : Any = mask_feature_prob SCREAMING_SNAKE_CASE_ : Dict = mask_feature_length # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE_ : Any = num_codevectors_per_group SCREAMING_SNAKE_CASE_ : Optional[Any] = num_codevector_groups SCREAMING_SNAKE_CASE_ : Optional[int] = contrastive_logits_temperature SCREAMING_SNAKE_CASE_ : Dict = num_negatives SCREAMING_SNAKE_CASE_ : Dict = codevector_dim SCREAMING_SNAKE_CASE_ : Dict = proj_codevector_dim SCREAMING_SNAKE_CASE_ : List[Any] = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE_ : Any = ctc_loss_reduction SCREAMING_SNAKE_CASE_ : Dict = ctc_zero_infinity # adapter SCREAMING_SNAKE_CASE_ : Optional[int] = add_adapter SCREAMING_SNAKE_CASE_ : str = adapter_kernel_size SCREAMING_SNAKE_CASE_ : List[str] = adapter_stride SCREAMING_SNAKE_CASE_ : List[str] = num_adapter_layers SCREAMING_SNAKE_CASE_ : Dict = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE_ : Tuple = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE_ : List[Any] = list(_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : str = list(_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = list(_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = xvector_output_dim @property def __A ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
345
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __UpperCamelCase ( A__ ): __A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __A : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __A : ClassVar[Features] = Features({} ) __A : str = "text" @property def UpperCamelCase( self ): return {self.text_column: "text"}
32
0
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _snake_case : """simple docstring""" def __init__( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = """""" _SCREAMING_SNAKE_CASE : Tuple = """""" _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : str = 2_5_6 _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def _lowerCAmelCase ( self : Dict , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = cva.imread(_a , 0) _SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(self.img) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""") _SCREAMING_SNAKE_CASE : Optional[int] = np.sum(_a) for i in range(len(_a)): _SCREAMING_SNAKE_CASE : int = x[i] / self.k self.sk += prk _SCREAMING_SNAKE_CASE : Any = (self.L - 1) * self.sk if self.rem != 0: _SCREAMING_SNAKE_CASE : int = int(last % last) _SCREAMING_SNAKE_CASE : List[Any] = int(last + 1 if self.rem >= 0.5 else last) self.last_list.append(_a) _SCREAMING_SNAKE_CASE : Any = int(np.ma.count(self.img) / self.img[1].size) _SCREAMING_SNAKE_CASE : int = self.img[1].size for i in range(self.number_of_cols): for j in range(self.number_of_rows): _SCREAMING_SNAKE_CASE : List[str] = self.img[j][i] if num != self.last_list[num]: _SCREAMING_SNAKE_CASE : str = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6]) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" cva.imshow("""Output-Image""" , self.img) cva.imshow("""Input-Image""" , self.original_image) cva.waitKey(5_0_0_0) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCAmelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowerCAmelCase ( unittest.TestCase ): @property def _a ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def _a ( self ) -> str: _UpperCAmelCase = self.dummy_uncond_unet _UpperCAmelCase = PNDMScheduler() _UpperCAmelCase = PNDMPipeline(unet=a_ , scheduler=a_ ) pndm.to(a_ ) pndm.set_progress_bar_config(disable=a_ ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pndm(generator=a_ , num_inference_steps=20 , output_type="numpy" ).images _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pndm(generator=a_ , num_inference_steps=20 , output_type="numpy" , return_dict=a_ )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _lowerCAmelCase ( unittest.TestCase ): def _a ( self ) -> List[str]: _UpperCAmelCase = """google/ddpm-cifar10-32""" _UpperCAmelCase = UNetaDModel.from_pretrained(a_ ) _UpperCAmelCase = PNDMScheduler() _UpperCAmelCase = PNDMPipeline(unet=a_ , scheduler=a_ ) pndm.to(a_ ) pndm.set_progress_bar_config(disable=a_ ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pndm(generator=a_ , output_type="numpy" ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
657
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset a__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class SCREAMING_SNAKE_CASE_ ( nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() __UpperCamelCase : Tuple = torchvision.models.resnetaaa(pretrained=lowerCAmelCase ) __UpperCamelCase : Union[str, Any] = list(model.children() )[:-2] __UpperCamelCase : List[str] = nn.Sequential(*lowerCAmelCase ) __UpperCamelCase : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" __UpperCamelCase : Dict = self.pool(self.model(lowerCAmelCase ) ) __UpperCamelCase : Dict = torch.flatten(lowerCAmelCase , start_dim=2 ) __UpperCamelCase : Optional[int] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" __UpperCamelCase : Dict = [json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )] __UpperCamelCase : Optional[Any] = os.path.dirname(lowerCAmelCase ) __UpperCamelCase : Optional[int] = tokenizer __UpperCamelCase : Dict = labels __UpperCamelCase : int = len(lowerCAmelCase ) __UpperCamelCase : List[str] = max_seq_length __UpperCamelCase : int = transforms def __len__( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return len(self.data ) def __getitem__( self : Optional[Any] , lowerCAmelCase : List[Any] ) -> int: """simple docstring""" __UpperCamelCase : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCAmelCase ) ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = sentence[0], sentence[1:-1], sentence[-1] __UpperCamelCase : Optional[int] = sentence[: self.max_seq_length] __UpperCamelCase : List[str] = torch.zeros(self.n_classes ) __UpperCamelCase : List[str] = 1 __UpperCamelCase : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCamelCase : Dict = self.transforms(lowerCAmelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __UpperCamelCase : List[Any] = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def A__ (snake_case : int ) -> Optional[int]: __UpperCamelCase : str = [len(row["""sentence"""] ) for row in batch] __UpperCamelCase , __UpperCamelCase : Optional[int] = len(snake_case ), max(snake_case ) __UpperCamelCase : List[str] = torch.zeros(snake_case , snake_case , dtype=torch.long ) __UpperCamelCase : Tuple = torch.zeros(snake_case , snake_case , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(snake_case , snake_case ) ): __UpperCamelCase : Optional[Any] = input_row["""sentence"""] __UpperCamelCase : Any = 1 __UpperCamelCase : List[Any] = torch.stack([row["""image"""] for row in batch] ) __UpperCamelCase : Tuple = torch.stack([row["""label"""] for row in batch] ) __UpperCamelCase : Tuple = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCamelCase : List[str] = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def A__ () -> Optional[Any]: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def A__ () -> Union[str, Any]: return transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
279
0
lowerCAmelCase__ = range(2, 2_0 + 1) lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)] lowerCAmelCase__ = {} def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) _UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) _UpperCamelCase : Dict = 0, 0 _UpperCamelCase : Optional[int] = n - i _UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ ) if sub_memo is not None: _UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over _UpperCamelCase : str = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCamelCase : Optional[Any] = _k break if max_jump >= 0: _UpperCamelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCamelCase : Tuple = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: _UpperCamelCase : Union[str, Any] = [] else: _UpperCamelCase : List[Any] = {c: []} _UpperCamelCase : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped _UpperCamelCase : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCamelCase : Union[str, Any] = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCamelCase : Any = i _UpperCamelCase : Any = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCamelCase : Union[str, Any] = ds_c + ds_b diff += addend _UpperCamelCase : Union[str, Any] = 0 for j in range(UpperCAmelCase_ ): _UpperCamelCase : Union[str, Any] = a_i[j] + addend _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): _UpperCamelCase : List[str] = digits[j] + addend if s >= 1_0: _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) _UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient else: _UpperCamelCase : Dict = s _UpperCamelCase : Optional[Any] = addend // 1_0 if addend == 0: break while addend > 0: _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [1] _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : int = 0 while True: _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break _UpperCamelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'{solution() = }')
715
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = """▁""" lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase__ = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } lowerCAmelCase__ = { """xlm-roberta-base""": 5_1_2, """xlm-roberta-large""": 5_1_2, """xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2, """xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2, """xlm-roberta-large-finetuned-conll03-english""": 5_1_2, """xlm-roberta-large-finetuned-conll03-german""": 5_1_2, } class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token _UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) _UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__snake_case)) _UpperCamelCase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase : List[Any] = 1 _UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset _UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self): _UpperCamelCase : List[Any] = self.__dict__.copy() _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self , __snake_case): _UpperCamelCase : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCamelCase : Tuple = {} _UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def A__ ( self , __snake_case , __snake_case = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase : Tuple = [self.cls_token_id] _UpperCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is None: return [1] + ([0] * len(__snake_case)) + [1] return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def A__ ( self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def A__ ( self): _UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def A__ ( self , __snake_case): return self.sp_model.encode(__snake_case , out_type=__snake_case) def A__ ( self , __snake_case): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase : str = self.sp_model.PieceToId(__snake_case) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , __snake_case): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def A__ ( self , __snake_case): _UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip() return out_string def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCamelCase : str = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __snake_case) elif not os.path.isfile(self.vocab_file): with open(__snake_case , 'wb') as fi: _UpperCamelCase : Any = self.sp_model.serialized_model_proto() fi.write(__snake_case) return (out_vocab_file,)
648
0
'''simple docstring''' import torch from transformers import AutoModel class UpperCamelCase__ ( torch.nn.Module ): """simple docstring""" def __init__( self , snake_case__="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(_a , self ).__init__() _lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_a , return_dict=_a ) _lowerCAmelCase : Dict = torch.nn.CosineSimilarity(3 , 1E-08 ) _lowerCAmelCase : Any = torch.nn.Softmax(dim=1 ) def a ( self , **snake_case__ ): '''simple docstring''' return self.bert(**_a ).last_hidden_state def a ( self , snake_case__ ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=_a ) def a ( self , snake_case__ , snake_case__ , snake_case__=1 ): '''simple docstring''' return self.softmax(T * self.cos(_a , _a ) ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = W_supports["""sizes"""].tolist() _lowerCAmelCase : Optional[int] = W_supports["""start_token_id"""].item() _lowerCAmelCase : Tuple = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _lowerCAmelCase : List[str] = self.BERT(**_a ) _lowerCAmelCase : List[Any] = self.BERT(**_a ) _lowerCAmelCase : Any = None _lowerCAmelCase : List[str] = None _lowerCAmelCase : Any = W_supports["""input_ids"""] == start_token_id _lowerCAmelCase : int = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(_a ): if i == 0: _lowerCAmelCase : Optional[int] = 0 else: _lowerCAmelCase : Any = support_sizes[i - 1] _lowerCAmelCase : List[str] = S[s : s + size][start_token_masks[s : s + size]] _lowerCAmelCase : Dict = S[s : s + size][end_token_masks[s : s + size]] _lowerCAmelCase : Union[str, Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _lowerCAmelCase : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _lowerCAmelCase : Optional[int] = torch.vstack((p_starts, p_start) ) _lowerCAmelCase : int = torch.vstack((p_ends, p_end) ) else: _lowerCAmelCase : Dict = p_start _lowerCAmelCase : str = p_end return p_starts, p_ends
444
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): # Initialise PyTorch model _A : Dict = BigBirdConfig.from_json_file(snake_case_ ) print(f'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: _A : Optional[int] = BigBirdForQuestionAnswering(snake_case_ ) else: _A : str = BigBirdForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(snake_case_,snake_case_,is_trivia_qa=snake_case_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) _snake_case = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
307
0
'''simple docstring''' def A__ ( A : Optional[int] , A : int): '''simple docstring''' UpperCamelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def A__ ( A : int , A : Optional[Any] , A : List[str]): '''simple docstring''' UpperCamelCase : Optional[Any] = 0 while b > 0: if b & 1: UpperCamelCase : Optional[int] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
435
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Dict: '''simple docstring''' super().__init__() UpperCamelCase : Optional[int] = pad_token_id UpperCamelCase : List[Any] = max_length UpperCamelCase : List[Any] = vocab UpperCamelCase : Tuple = merges UpperCamelCase : int = BytePairTokenizer(lowerCamelCase , lowerCamelCase , sequence_length=lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: '''simple docstring''' UpperCamelCase : List[str] = [" ".join(lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()] UpperCamelCase : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: '''simple docstring''' UpperCamelCase : List[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) return cls.from_tokenizer(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase ) -> Tuple: '''simple docstring''' return cls(**lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[Any]: '''simple docstring''' UpperCamelCase : Any = self.tf_tokenizer(lowerCamelCase ) UpperCamelCase : Tuple = tf.ones_like(lowerCamelCase ) if self.pad_token_id is not None: # pad the tokens up to max length UpperCamelCase : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: UpperCamelCase , UpperCamelCase : int = pad_model_inputs( lowerCamelCase , max_seq_length=lowerCamelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
435
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict lowercase_ = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) ->Tuple: return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def lowerCamelCase ( __lowerCamelCase : List[Any] ) ->Union[str, Any]: _SCREAMING_SNAKE_CASE = _TestCommandArgs(dataset=A__ , all_configs=A__ , save_infos=A__ ) _SCREAMING_SNAKE_CASE = TestCommand(*A__ ) test_command.run() _SCREAMING_SNAKE_CASE = os.path.join(A__ , """README.md""" ) assert os.path.exists(A__ ) _SCREAMING_SNAKE_CASE = DatasetInfosDict.from_directory(A__ ) _SCREAMING_SNAKE_CASE = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 235_1563, """num_examples""": 1_0000, }, { """name""": """validation""", """num_bytes""": 23_8418, """num_examples""": 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = getattr(dataset_infos["""default"""] , A__ ), getattr(expected_dataset_infos["""default"""] , A__ ) if key == "num_bytes": assert is_apercent_close(A__ , A__ ) elif key == "splits": assert list(A__ ) == list(A__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
314
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging __magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase ( A__ ): '''simple docstring''' def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[Any]: """simple docstring""" super().__init__() if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" ''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered''' ''' results in services or applications open to the public. Both the diffusers team and Hugging Face''' ''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling''' ''' it only for use-cases that involve analyzing network behavior or auditing its results. For more''' ''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' ) self.register_modules( speech_model=_snake_case , speech_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , feature_extractor=_snake_case , ) def snake_case_ ( self , _snake_case = "auto" ) -> List[str]: """simple docstring""" if slice_size == "auto": UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_snake_case ) def snake_case_ ( self ) -> Any: """simple docstring""" self.enable_attention_slicing(_snake_case ) @torch.no_grad() def __call__( self , _snake_case , _snake_case=1_6000 , _snake_case = 512 , _snake_case = 512 , _snake_case = 50 , _snake_case = 7.5 , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , _snake_case = None , _snake_case = 1 , **_snake_case , ) -> List[Any]: """simple docstring""" UpperCAmelCase = self.speech_processor.feature_extractor( _snake_case , return_tensors='''pt''' , sampling_rate=_snake_case ).input_features.to(self.device ) UpperCAmelCase = self.speech_model.generate(_snake_case , max_length=48_0000 ) UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , normalize=_snake_case )[ 0 ] if isinstance(_snake_case , _snake_case ): UpperCAmelCase = 1 elif isinstance(_snake_case , _snake_case ): UpperCAmelCase = len(_snake_case ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_snake_case )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_snake_case )}.""" ) # get prompt text embeddings UpperCAmelCase = self.tokenizer( _snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = text_embeddings.shape UpperCAmelCase = text_embeddings.repeat(1 , _snake_case , 1 ) UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _snake_case , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase = 42 if negative_prompt is None: UpperCAmelCase = [''''''] * batch_size elif type(_snake_case ) is not type(_snake_case ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !=""" f""" {type(_snake_case )}.""" ) elif isinstance(_snake_case , _snake_case ): UpperCAmelCase = [negative_prompt] elif batch_size != len(_snake_case ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''' ) else: UpperCAmelCase = negative_prompt UpperCAmelCase = text_input_ids.shape[-1] UpperCAmelCase = self.tokenizer( _snake_case , padding='''max_length''' , max_length=_snake_case , truncation=_snake_case , return_tensors='''pt''' , ) UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase = uncond_embeddings.shape[1] UpperCAmelCase = uncond_embeddings.repeat(1 , _snake_case , 1 ) UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _snake_case , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase = torch.randn(_snake_case , generator=_snake_case , device='''cpu''' , dtype=_snake_case ).to( self.device ) else: UpperCAmelCase = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) UpperCAmelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase = {} if accepts_eta: UpperCAmelCase = eta for i, t in enumerate(self.progress_bar(_snake_case ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case ) # predict the noise residual UpperCAmelCase = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 ) UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_snake_case , _snake_case , _snake_case ) UpperCAmelCase = 1 / 0.1_8215 * latents UpperCAmelCase = self.vae.decode(_snake_case ).sample UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(_snake_case ) if not return_dict: return image return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case )
254
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor A = logging.get_logger(__name__) class lowercase__ ( __SCREAMING_SNAKE_CASE ): def __init__( self : Union[str, Any] , *_lowercase : Any , **_lowercase : Optional[int] ): """simple docstring""" warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
714
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A = 16 A = 32 def __UpperCAmelCase ( __A , __A = 1_6 ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase__ = load_dataset("glue" , "mrpc" ) def tokenize_function(__A ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__A , max_length=__A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ = datasets.map( __A , batched=__A , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__A ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ = 1_6 elif accelerator.mixed_precision != "no": UpperCAmelCase__ = 8 else: UpperCAmelCase__ = None return tokenizer.pad( __A , padding="longest" , max_length=__A , pad_to_multiple_of=__A , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase__ = DataLoader( tokenized_datasets["train"] , shuffle=__A , collate_fn=__A , batch_size=__A ) UpperCAmelCase__ = DataLoader( tokenized_datasets["validation"] , shuffle=__A , collate_fn=__A , batch_size=__A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders A = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __A , __A ) -> Dict: '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , __A ) == "1": UpperCAmelCase__ = 2 # New Code # UpperCAmelCase__ = int(args.gradient_accumulation_steps ) # Initialize accelerator UpperCAmelCase__ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__A ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ = config["lr"] UpperCAmelCase__ = int(config["num_epochs"] ) UpperCAmelCase__ = int(config["seed"] ) UpperCAmelCase__ = int(config["batch_size"] ) UpperCAmelCase__ = evaluate.load("glue" , "mrpc" ) set_seed(__A ) UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(__A , __A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ = AdamW(params=model.parameters() , lr=__A ) # Instantiate scheduler UpperCAmelCase__ = get_linear_schedule_with_warmup( optimizer=__A , num_warmup_steps=1_0_0 , num_training_steps=(len(__A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare( __A , __A , __A , __A , __A ) # Now we train the model for epoch in range(__A ): model.train() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__A ): UpperCAmelCase__ = model(**__A ) UpperCAmelCase__ = output.loss accelerator.backward(__A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ = model(**__A ) UpperCAmelCase__ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__A , references=__A , ) UpperCAmelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __A ) def __UpperCAmelCase ( ) -> Dict: '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=__A , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6} training_function(__A , __A ) if __name__ == "__main__": main()
277
0
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __UpperCAmelCase ( a_: str ): for i in range(0, _lowercase ): for _ in range(0, n - i - 1 ): # printing spaces print(" ", end="" ) for _ in range(0, i + 1 ): # printing stars print("* ", end="" ) print() def __UpperCAmelCase ( a_: Optional[int] ): for i in range(_lowercase, 0, -1 ): for _ in range(_lowercase, 0, -1 ): # printing stars print("* ", end="" ) print() for _ in range(n - i + 1, 0, -1 ): # printing spaces print(" ", end="" ) def __UpperCAmelCase ( a_: Union[str, Any] ): if n <= 0: print(" ... .... nothing printing :(" ) return floyd(_lowercase ) # upper half reverse_floyd(_lowercase ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') __a = 1 while K: __a = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) __a = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
494
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __UpperCamelCase : Any = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n' __UpperCamelCase : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n' __UpperCamelCase : List[Any] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def A ( _lowercase , _lowercase ): return float((preds == labels).mean() ) def A ( _lowercase , _lowercase , _lowercase="binary" ): SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) ) return { "accuracy": acc, "f1": fa, } def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : List[Any] = {} for id_pred, label in zip(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" SCREAMING_SNAKE_CASE : List[str] = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: SCREAMING_SNAKE_CASE : int = [(pred, label)] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [] for question, preds_labels in question_map.items(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = zip(*_lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = fa_score(y_true=_lowercase , y_pred=_lowercase , average='''macro''' ) fas.append(_lowercase ) SCREAMING_SNAKE_CASE : str = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) ) ems.append(_lowercase ) SCREAMING_SNAKE_CASE : List[str] = float(sum(_lowercase ) / len(_lowercase ) ) SCREAMING_SNAKE_CASE : Tuple = sum(_lowercase ) / len(_lowercase ) SCREAMING_SNAKE_CASE : int = float(fa_score(y_true=_lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowercase__ ( datasets.Metric): def __A ( self : Optional[Any] ): '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def __A ( self : Union[str, Any] ): '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def __A ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ): '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )} elif self.config_name == "cb": return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ , fa_avg='''macro''' ) elif self.config_name == "record": SCREAMING_SNAKE_CASE : List[str] = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] SCREAMING_SNAKE_CASE : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(UpperCamelCase__ , UpperCamelCase__ )[0] elif self.config_name == "multirc": return evaluate_multirc(UpperCamelCase__ , UpperCamelCase__ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
248
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER', 'False' ) ) is not True, reason='Skipping test because should only be run when releasing minor transformers version', ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class _SCREAMING_SNAKE_CASE (unittest.TestCase ): def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=__UpperCamelCase , ) assert hasattr(self , '''env''' ) def lowerCAmelCase ( self : Any , __UpperCamelCase : str ) -> Optional[int]: """simple docstring""" snake_case__ : Dict = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}''' # distributed data settings snake_case__ : Union[str, Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__UpperCamelCase , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='''py36''' , ) def lowerCAmelCase ( self : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]: """simple docstring""" TrainingJobAnalytics(__UpperCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def lowerCAmelCase ( self : Tuple , __UpperCamelCase : List[Any] ) -> List[str]: """simple docstring""" snake_case__ : str = self.create_estimator(__UpperCamelCase ) # run training estimator.fit() # result dataframe snake_case__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case__ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) snake_case__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case__ : Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCamelCase )
574
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") _lowercase : Any =logging.getLogger(__name__) @dataclass class _SCREAMING_SNAKE_CASE : A__ = field( default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) A__ = field( default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, ) A__ = field( default=1024, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) A__ = field( default=lowercase__, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) A__ = field( default=lowercase__, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) A__ = field( default=lowercase__, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) A__ = field( default=lowercase__, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) A__ = field( default=lowercase__, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) }, ) A__ = field( default=lowercase__, metadata={'help': 'A csv or a json file containing the training data.'} ) A__ = field( default=lowercase__, metadata={'help': 'A csv or a json file containing the validation data.'} ) A__ = field(default=lowercase__, metadata={'help': 'A csv or a json file containing the test data.'} ) def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' ) else: snake_case__ : int = self.train_file.split('''.''' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." snake_case__ : str = self.validation_file.split('''.''' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _SCREAMING_SNAKE_CASE : A__ = field( default=lowercase__, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A__ = field( default=lowercase__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A__ = field( default=lowercase__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A__ = field( default=lowercase__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) A__ = field( default=lowercase__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) A__ = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) A__ = field( default=lowercase__, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def __UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case__ , snake_case__ , snake_case__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) snake_case__ : Optional[int] = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. snake_case__ : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case__ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. snake_case__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. snake_case__ : Optional[int] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: snake_case__ : int = data_args.train_file.split('''.''' )[-1] snake_case__ : str = data_args.test_file.split('''.''' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." snake_case__ : List[str] = data_args.test_file else: raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' ) for key in data_files.keys(): logger.info(F'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith('''.csv''' ): # Loading a dataset from local csv files snake_case__ : Any = load_dataset('''csv''' , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files snake_case__ : Union[str, Any] = load_dataset('''json''' , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels snake_case__ : List[Any] = raw_datasets['''train'''].features['''label'''].names snake_case__ : Optional[Any] = len(UpperCamelCase__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer snake_case__ : Optional[Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCamelCase__ , ) snake_case__ : Tuple = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: snake_case__ : List[str] = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch snake_case__ : str = False # Some models have set the order of the labels to use, so let's make sure we do use it. snake_case__ : List[Any] = {'''Refused''': 0, '''Entailed''': 1} snake_case__ : Optional[Any] = {0: '''Refused''', 1: '''Entailed'''} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) snake_case__ : Dict = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(UpperCamelCase__ :Tuple ): # Tokenize the texts def _convert_table_text_to_pandas(UpperCamelCase__ :List[Any] ): snake_case__ : str = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )] snake_case__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd snake_case__ : Optional[Any] = examples['''statement'''] snake_case__ : str = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) ) snake_case__ : Tuple = tokenizer(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ) snake_case__ : List[str] = examples['''label'''] return result with training_args.main_process_first(desc='''dataset map pre-processing''' ): snake_case__ : str = raw_datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) snake_case__ : Optional[int] = raw_datasets['''train'''] if data_args.max_train_samples is not None: snake_case__ : List[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) snake_case__ : int = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: snake_case__ : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('''--do_predict requires a test dataset''' ) snake_case__ : Union[str, Any] = raw_datasets['''test'''] if data_args.max_predict_samples is not None: snake_case__ : str = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(UpperCamelCase__ :EvalPrediction ): snake_case__ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions snake_case__ : Dict = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: snake_case__ : str = default_data_collator elif training_args.fpaa: snake_case__ : List[str] = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) else: snake_case__ : str = None # Initialize our Trainer snake_case__ : Optional[Any] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , ) # Training if training_args.do_train: snake_case__ : Optional[Any] = None if training_args.resume_from_checkpoint is not None: snake_case__ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: snake_case__ : List[Any] = last_checkpoint snake_case__ : Union[str, Any] = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) snake_case__ : Dict = train_result.metrics snake_case__ : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) snake_case__ : Any = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , UpperCamelCase__ ) trainer.save_metrics('''train''' , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) snake_case__ : Union[str, Any] = trainer.evaluate(eval_dataset=UpperCamelCase__ ) snake_case__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) snake_case__ : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics('''eval''' , UpperCamelCase__ ) trainer.save_metrics('''eval''' , UpperCamelCase__ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. snake_case__ : Any = predict_dataset.remove_columns('''label''' ) snake_case__ : Union[str, Any] = trainer.predict(UpperCamelCase__ , metric_key_prefix='''predict''' ).predictions snake_case__ : Optional[Any] = np.argmax(UpperCamelCase__ , axis=1 ) snake_case__ : str = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' ) if trainer.is_world_process_zero(): with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Predict Results *****''' ) writer.write('''index\tprediction\n''' ) for index, item in enumerate(UpperCamelCase__ ): snake_case__ : Tuple = label_list[item] writer.write(F'''{index}\t{item}\n''' ) snake_case__ : List[str] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''} if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
574
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _A = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _snake_case : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _snake_case : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _snake_case : Optional[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def A ( self : Tuple , A_ : Any , A_ : Optional[Any] , A_ : Any )-> Union[str, Any]: __UpperCamelCase = ZeroShotClassificationPipeline( model=A_ , tokenizer=A_ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def A ( self : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] )-> List[Any]: __UpperCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(A_ , {"sequence": ANY(A_ ), "labels": [ANY(A_ )], "scores": [ANY(A_ )]} ) # No kwarg __UpperCamelCase = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(A_ , {"sequence": ANY(A_ ), "labels": [ANY(A_ )], "scores": [ANY(A_ )]} ) __UpperCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(A_ , {"sequence": ANY(A_ ), "labels": [ANY(A_ )], "scores": [ANY(A_ )]} ) __UpperCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( A_ , {"sequence": ANY(A_ ), "labels": [ANY(A_ ), ANY(A_ )], "scores": [ANY(A_ ), ANY(A_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) __UpperCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( A_ , {"sequence": ANY(A_ ), "labels": [ANY(A_ ), ANY(A_ )], "scores": [ANY(A_ ), ANY(A_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) __UpperCamelCase = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(A_ , {"sequence": ANY(A_ ), "labels": [ANY(A_ )], "scores": [ANY(A_ )]} ) # https://github.com/huggingface/transformers/issues/13846 __UpperCamelCase = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( A_ , [ {"sequence": ANY(A_ ), "labels": [ANY(A_ ), ANY(A_ )], "scores": [ANY(A_ ), ANY(A_ )]} for i in range(1 ) ] , ) __UpperCamelCase = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( A_ , [ {"sequence": ANY(A_ ), "labels": [ANY(A_ ), ANY(A_ )], "scores": [ANY(A_ ), ANY(A_ )]} for i in range(2 ) ] , ) with self.assertRaises(A_ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(A_ ): classifier(A_ , candidate_labels="politics" ) with self.assertRaises(A_ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(A_ ): classifier("Who are you voting for in 2020?" , candidate_labels=A_ ) with self.assertRaises(A_ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(A_ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=A_ , ) self.run_entailment_id(A_ ) def A ( self : Tuple , A_ : Pipeline )-> str: __UpperCamelCase = zero_shot_classifier.model.config __UpperCamelCase = config.labelaid __UpperCamelCase = zero_shot_classifier.entailment_id __UpperCamelCase = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) __UpperCamelCase = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) __UpperCamelCase = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) __UpperCamelCase = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) __UpperCamelCase = original_labelaid self.assertEqual(A_ , zero_shot_classifier.entailment_id ) @require_torch def A ( self : int )-> List[Any]: __UpperCamelCase = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"] ) @require_torch def A ( self : Tuple )-> List[Any]: __UpperCamelCase = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) __UpperCamelCase = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(A_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], } , ) @require_tf def A ( self : int )-> Dict: __UpperCamelCase = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) __UpperCamelCase = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(A_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], } , ) @slow @require_torch def A ( self : Any )-> str: __UpperCamelCase = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) __UpperCamelCase = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(A_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], } , ) __UpperCamelCase = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=A_ , ) self.assertEqual( nested_simplify(A_ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def A ( self : List[Any] )-> Any: __UpperCamelCase = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) __UpperCamelCase = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(A_ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], } , ) __UpperCamelCase = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=A_ , ) self.assertEqual( nested_simplify(A_ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], } , )
505
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _A = logging.getLogger(__name__) def lowercase () -> List[str]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" ,type=_snake_case ,default="wikitext" ,help="Name of the training. Explore datasets at: hf.co/datasets." ,) parser.add_argument( "--dataset_config" ,type=_snake_case ,default="wikitext-103-raw-v1" ,help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" ,type=_snake_case ,default="sayakpaul/unigram-tokenizer-wikitext" ,help="Tokenizer identifier. Can be a local filepath or a Hub identifier." ,) parser.add_argument( "--shard_size" ,type=_snake_case ,default=1000 ,help="Number of entries to go in a single shard." ,) parser.add_argument("--split" ,type=_snake_case ,default="train" ,choices=["train", "test", "validation"] ) parser.add_argument( "--limit" ,default=_snake_case ,type=_snake_case ,help="Limit the number of shards (used for debugging)." ,) parser.add_argument( "--max_length" ,type=_snake_case ,default=512 ,help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." ,) parser.add_argument( "--output_dir" ,default="tf-tpu" ,type=_snake_case ,help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." ,) __UpperCamelCase = parser.parse_args() return args def lowercase (_snake_case ) -> List[Any]: '''simple docstring''' def fn(_snake_case ): return tokenizer(examples["text"] ) return fn def lowercase (_snake_case ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = [] for i in range(len(tokenized_data["input_ids"] ) ): __UpperCamelCase = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } __UpperCamelCase = tf.train.Features(feature=_snake_case ) __UpperCamelCase = tf.train.Example(features=_snake_case ) __UpperCamelCase = example.SerializeToString() records.append(_snake_case ) return records def lowercase (_snake_case ) -> Dict: '''simple docstring''' __UpperCamelCase = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split ) if args.limit is not None: __UpperCamelCase = min(len(_snake_case ) ,args.limit ) __UpperCamelCase = dataset.select(range(_snake_case ) ) print(f"""Limiting the dataset to {args.limit} entries.""" ) __UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) __UpperCamelCase = os.path.join(args.output_dir ,args.split ) if not os.path.exists(_snake_case ): os.makedirs(_snake_case ) else: __UpperCamelCase = os.path.join(args.output_dir ,args.split ) # Tokenize the whole dataset at once. __UpperCamelCase = tokenize_function(_snake_case ) __UpperCamelCase = dataset.map(_snake_case ,batched=_snake_case ,num_proc=4 ,remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_snake_case ): # Concatenate all texts. __UpperCamelCase = {k: sum(examples[k] ,[] ) for k in examples.keys()} __UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 __UpperCamelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. __UpperCamelCase = { k: [t[i : i + args.max_length] for i in range(0 ,_snake_case ,args.max_length )] for k, t in concatenated_examples.items() } return result __UpperCamelCase = dataset_tokenized.map(_snake_case ,batched=_snake_case ,batch_size=1000 ,num_proc=4 ) __UpperCamelCase = 0 __UpperCamelCase = 0 for shard in range(0 ,len(_snake_case ) ,args.shard_size ): __UpperCamelCase = grouped_dataset[shard : shard + args.shard_size] __UpperCamelCase = len(dataset_snapshot["input_ids"] ) __UpperCamelCase = os.path.join(_snake_case ,f"""dataset-{shard_count}-{records_containing}.tfrecord""" ) __UpperCamelCase = get_serialized_examples(_snake_case ) with tf.io.TFRecordWriter(_snake_case ) as out_file: for i in range(len(_snake_case ) ): __UpperCamelCase = serialized_examples[i] out_file.write(_snake_case ) print("Wrote file {} containing {} records".format(_snake_case ,_snake_case ) ) shard_count += 1 total_records += records_containing with open(f"""split-{args.split}-records-count.txt""" ,"w" ) as f: print(f"""Total {args.split} records: {total_records}""" ,file=_snake_case ) if __name__ == "__main__": _A = parse_args() main(args)
505
1
from __future__ import annotations a_ : int = [] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[str] ): for i in range(len(snake_case_ ) ): if board[row][i] == 1: return False for i in range(len(snake_case_ ) ): if board[i][column] == 1: return False for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ): if board[i][j] == 1: return False return True def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ): if row >= len(snake_case_ ): solution.append(snake_case_ ) printboard(snake_case_ ) print() return True for i in range(len(snake_case_ ) ): if is_safe(snake_case_ , snake_case_ , snake_case_ ): __magic_name__ = 1 solve(snake_case_ , row + 1 ) __magic_name__ = 0 return False def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): for i in range(len(snake_case_ ) ): for j in range(len(snake_case_ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) a_ : List[Any] = 8 a_ : Optional[int] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
711
def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = [] __magic_name__ = 1 while len(snake_case_ ) < 1E6: constant.append(str(snake_case_ ) ) i += 1 __magic_name__ = ''''''.join(snake_case_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
678
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase ( unittest.TestCase ): def __UpperCAmelCase ( self : List[str] ): """simple docstring""" _snake_case = tempfile.mkdtemp() # fmt: off _snake_case = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) _snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _snake_case = {'''unk_token''': '''<unk>'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCamelCase ) ) _snake_case = { '''do_resize''': True, '''size''': 2_0, '''do_center_crop''': True, '''crop_size''': 1_8, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _snake_case = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int , **__lowerCamelCase : List[str] ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , **__lowerCamelCase : List[str] ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowerCamelCase ) def __UpperCAmelCase ( self : Tuple , **__lowerCamelCase : Dict ): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = self.get_image_processor() _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _snake_case = self.get_image_processor(do_normalize=__lowerCamelCase ) _snake_case = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(__lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(images=__lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = processor(text=__lowerCamelCase , return_tensors='''np''' ) _snake_case = tokenizer(__lowerCamelCase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = '''google/owlvit-base-patch32''' _snake_case = OwlViTProcessor.from_pretrained(__lowerCamelCase ) _snake_case = ['''cat''', '''nasa badge'''] _snake_case = processor(text=__lowerCamelCase ) _snake_case = 1_6 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = '''google/owlvit-base-patch32''' _snake_case = OwlViTProcessor.from_pretrained(__lowerCamelCase ) _snake_case = [['''cat''', '''nasa badge'''], ['''person''']] _snake_case = processor(text=__lowerCamelCase ) _snake_case = 1_6 _snake_case = len(__lowerCamelCase ) _snake_case = max([len(__lowerCamelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = '''google/owlvit-base-patch32''' _snake_case = OwlViTProcessor.from_pretrained(__lowerCamelCase ) _snake_case = ['''cat''', '''nasa badge'''] _snake_case = processor(text=__lowerCamelCase ) _snake_case = 1_6 _snake_case = inputs['''input_ids'''] _snake_case = [ [4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) _snake_case = self.prepare_image_inputs() _snake_case = self.prepare_image_inputs() _snake_case = processor(images=__lowerCamelCase , query_images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : str ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) _snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case = processor.batch_decode(__lowerCamelCase ) _snake_case = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
103
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : List[str] =logging.get_logger(__name__) class _UpperCAmelCase ( a_ ): """simple docstring""" __snake_case = ["""pixel_values"""] def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = IMAGENET_DEFAULT_MEAN , _lowercase = IMAGENET_DEFAULT_STD , **_lowercase , ) -> None: super().__init__(**_lowercase ) _lowerCamelCase : Dict = size if size is not None else {'''shortest_edge''': 224} _lowerCamelCase : str = get_size_dict(_lowercase , default_to_square=_lowercase ) _lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _lowerCamelCase : List[str] = get_size_dict(_lowercase , param_name='''crop_size''' ) _lowerCamelCase : int = do_resize _lowerCamelCase : List[str] = size _lowerCamelCase : str = resample _lowerCamelCase : Union[str, Any] = do_center_crop _lowerCamelCase : str = crop_size _lowerCamelCase : Dict = do_rescale _lowerCamelCase : Optional[Any] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCamelCase : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray: _lowerCamelCase : Dict = get_size_dict(_lowercase , default_to_square=_lowercase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCamelCase : Tuple = int((256 / 224) * size['''shortest_edge'''] ) _lowerCamelCase : Optional[Any] = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase ) _lowerCamelCase : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowercase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def a__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray: _lowerCamelCase : int = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase ) def a__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray: return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray: return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> BatchFeature: _lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : Union[str, Any] = resample if resample is not None else self.resample _lowerCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Optional[int] = image_std if image_std is not None else self.image_std _lowerCamelCase : Tuple = size if size is not None else self.size _lowerCamelCase : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase ) _lowerCamelCase : Tuple = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : List[str] = get_size_dict(_lowercase , param_name='''crop_size''' ) _lowerCamelCase : List[Any] = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _lowerCamelCase : List[str] = [to_numpy_array(_lowercase ) for image in images] if do_resize: _lowerCamelCase : List[str] = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images] if do_center_crop: _lowerCamelCase : Union[str, Any] = [self.center_crop(_lowercase , _lowercase ) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(_lowercase , _lowercase ) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images] _lowerCamelCase : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] _lowerCamelCase : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
434
0
from __future__ import annotations from random import random class lowerCAmelCase_ : '''simple docstring''' def __init__( self , __UpperCAmelCase = None ): SCREAMING_SNAKE_CASE_ : Dict =value SCREAMING_SNAKE_CASE_ : Optional[Any] =random() SCREAMING_SNAKE_CASE_ : Node | None =None SCREAMING_SNAKE_CASE_ : Node | None =None def __repr__( self ): from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ): SCREAMING_SNAKE_CASE_ : Dict =str(self.value ) + ' ' SCREAMING_SNAKE_CASE_ : List[str] =str(self.left or '' ) SCREAMING_SNAKE_CASE_ : int =str(self.right or '' ) return value + left + right def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Node | None ,lowerCAmelCase_ : int ) -> tuple[Node | None, Node | None]: """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: SCREAMING_SNAKE_CASE_ : Union[str, Any] =split(root.left ,lowerCAmelCase_ ) return left, root else: SCREAMING_SNAKE_CASE_ : List[str] =split(root.right ,lowerCAmelCase_ ) return root, right def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Node | None ,lowerCAmelCase_ : Node | None ) -> Node | None: """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: SCREAMING_SNAKE_CASE_ : List[str] =merge(left.right ,lowerCAmelCase_ ) return left else: SCREAMING_SNAKE_CASE_ : int =merge(lowerCAmelCase_ ,right.left ) return right def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Node | None ,lowerCAmelCase_ : int ) -> Node | None: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] =Node(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =split(lowerCAmelCase_ ,lowerCAmelCase_ ) return merge(merge(lowerCAmelCase_ ,lowerCAmelCase_ ) ,lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Node | None ,lowerCAmelCase_ : int ) -> Node | None: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] =split(lowerCAmelCase_ ,value - 1 ) SCREAMING_SNAKE_CASE_ : List[Any] =split(lowerCAmelCase_ ,lowerCAmelCase_ ) return merge(lowerCAmelCase_ ,lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Node | None ) -> None: """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value ,end=',' ) inorder(root.right ) def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Node | None ,lowerCAmelCase_ : str ) -> Node | None: """simple docstring""" for arg in args.split(): if arg[0] == "+": SCREAMING_SNAKE_CASE_ : List[Any] =insert(lowerCAmelCase_ ,int(arg[1:] ) ) elif arg[0] == "-": SCREAMING_SNAKE_CASE_ : Dict =erase(lowerCAmelCase_ ,int(arg[1:] ) ) else: print('Unknown command' ) return root def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : str =None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) SCREAMING_SNAKE_CASE_ : List[str] =input() while args != "q": SCREAMING_SNAKE_CASE_ : Optional[int] =interact_treap(lowerCAmelCase_ ,lowerCAmelCase_ ) print(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Any =input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
700
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : int ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCAmelCase_ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : list[list[int]] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : int ) -> bool: """simple docstring""" if index == len(lowerCAmelCase_ ): return True # Recursive Step for i in range(lowerCAmelCase_ ): if valid_coloring(graph[index] ,lowerCAmelCase_ ,lowerCAmelCase_ ): # Color current vertex SCREAMING_SNAKE_CASE_ : int =i # Validate coloring if util_color(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,index + 1 ): return True # Backtrack SCREAMING_SNAKE_CASE_ : Optional[int] =-1 return False def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : list[list[int]] ,lowerCAmelCase_ : int ) -> list[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =[-1] * len(lowerCAmelCase_ ) if util_color(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,0 ): return colored_vertices return []
153
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _snake_case : ClassVar[Features] = Features({'audio': Audio()} ) _snake_case : ClassVar[Features] = Features({'labels': ClassLabel} ) _snake_case : str = "audio" _snake_case : str = "labels" def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Any: '''simple docstring''' if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , lowerCAmelCase__ ): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" ) _UpperCamelCase = copy.deepcopy(self ) _UpperCamelCase = self.label_schema.copy() _UpperCamelCase = features[self.label_column] _UpperCamelCase = label_schema return task_template @property def snake_case__ ( self : int ) -> Dict[str, str]: '''simple docstring''' return { self.audio_column: "audio", self.label_column: "labels", }
98
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ : Dict = False class __lowerCAmelCase ( unittest.TestCase ): def snake_case_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case_ (self ): return 1_2 @property def snake_case_ (self ): return 1_2 @property def snake_case_ (self ): return 3_2 @property def snake_case_ (self ): torch.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def snake_case_ (self ): _UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def snake_case_ (self ): torch.manual_seed(0 ) _UpperCAmelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(lowerCAmelCase__ ) @property def snake_case_ (self ): torch.manual_seed(0 ) _UpperCAmelCase : int = 1_2 _UpperCAmelCase : Tuple = 1_2 _UpperCAmelCase : Any = { """attention_bias""": True, """cross_attention_dim""": 3_2, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 3_2, """sample_size""": width, """activation_fn""": """geglu-approximate""", } _UpperCAmelCase : Tuple = TransformeraDModel(**lowerCAmelCase__ ) return model def snake_case_ (self ): _UpperCAmelCase : List[str] = """cpu""" _UpperCAmelCase : Any = self.dummy_vqvae _UpperCAmelCase : int = self.dummy_text_encoder _UpperCAmelCase : Tuple = self.dummy_tokenizer _UpperCAmelCase : List[str] = self.dummy_transformer _UpperCAmelCase : Tuple = VQDiffusionScheduler(self.num_embed ) _UpperCAmelCase : int = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = VQDiffusionPipeline( vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , ) _UpperCAmelCase : List[Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = """teddy bear playing in the pool""" _UpperCAmelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" ) _UpperCAmelCase : Union[str, Any] = output.images _UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) _UpperCAmelCase : Optional[int] = pipe( [prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0] _UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] _UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) _UpperCAmelCase : str = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ): _UpperCAmelCase : Optional[Any] = """cpu""" _UpperCAmelCase : Tuple = self.dummy_vqvae _UpperCAmelCase : Dict = self.dummy_text_encoder _UpperCAmelCase : int = self.dummy_tokenizer _UpperCAmelCase : Any = self.dummy_transformer _UpperCAmelCase : List[str] = VQDiffusionScheduler(self.num_embed ) _UpperCAmelCase : str = LearnedClassifierFreeSamplingEmbeddings( learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) _UpperCAmelCase : Tuple = VQDiffusionPipeline( vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , ) _UpperCAmelCase : List[str] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = """teddy bear playing in the pool""" _UpperCAmelCase : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) _UpperCAmelCase : Dict = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" ) _UpperCAmelCase : Dict = output.images _UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe( [prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0] _UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] _UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) _UpperCAmelCase : List[str] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def snake_case_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ (self ): _UpperCAmelCase : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" ) _UpperCAmelCase : List[Any] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" ) _UpperCAmelCase : Union[str, Any] = pipeline.to(lowerCAmelCase__ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though _UpperCAmelCase : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipeline( """teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , ) _UpperCAmelCase : Dict = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
414
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _UpperCamelCase( SCREAMING_SNAKE_CASE ): __A: List[Any] = """char""" __A: List[str] = """bpe""" __A: Optional[int] = """wp""" __lowerCamelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _UpperCamelCase( SCREAMING_SNAKE_CASE ): __A: Tuple = ["""image_processor""", """char_tokenizer"""] __A: List[Any] = """ViTImageProcessor""" __A: str = """MgpstrTokenizer""" def __init__( self : Union[str, Any] , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : int ): _UpperCAmelCase : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _lowerCamelCase , ) _UpperCAmelCase : List[str] = kwargs.pop("feature_extractor" ) _UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _UpperCAmelCase : Any = tokenizer _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("gpt2" ) _UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self : str , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : Optional[Any] ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _UpperCAmelCase : str = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None: _UpperCAmelCase : List[Any] = self.char_tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif images is None: return encodings else: _UpperCAmelCase : Tuple = encodings["input_ids"] return inputs def a__ ( self : Optional[int] , _lowerCamelCase : List[str] ): _UpperCAmelCase : Any = sequences _UpperCAmelCase : str = char_preds.size(0 ) _UpperCAmelCase : Dict = self._decode_helper(_lowerCamelCase , "char" ) _UpperCAmelCase : str = self._decode_helper(_lowerCamelCase , "bpe" ) _UpperCAmelCase : Optional[int] = self._decode_helper(_lowerCamelCase , "wp" ) _UpperCAmelCase : Tuple = [] _UpperCAmelCase : str = [] for i in range(_lowerCamelCase ): _UpperCAmelCase : Any = [char_scores[i], bpe_scores[i], wp_scores[i]] _UpperCAmelCase : int = [char_strs[i], bpe_strs[i], wp_strs[i]] _UpperCAmelCase : int = scores.index(max(_lowerCamelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _UpperCAmelCase : Any = {} _UpperCAmelCase : Any = final_strs _UpperCAmelCase : Dict = final_scores _UpperCAmelCase : List[str] = char_strs _UpperCAmelCase : int = bpe_strs _UpperCAmelCase : List[str] = wp_strs return out def a__ ( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ): if format == DecodeType.CHARACTER: _UpperCAmelCase : List[str] = self.char_decode _UpperCAmelCase : Union[str, Any] = 1 _UpperCAmelCase : Any = "[s]" elif format == DecodeType.BPE: _UpperCAmelCase : Optional[int] = self.bpe_decode _UpperCAmelCase : List[Any] = 2 _UpperCAmelCase : List[Any] = "#" elif format == DecodeType.WORDPIECE: _UpperCAmelCase : str = self.wp_decode _UpperCAmelCase : Tuple = 1_02 _UpperCAmelCase : List[str] = "[SEP]" else: raise ValueError(f"""Format {format} is not supported.""" ) _UpperCAmelCase : Tuple = [], [] _UpperCAmelCase : List[Any] = pred_logits.size(0 ) _UpperCAmelCase : Optional[Any] = pred_logits.size(1 ) _UpperCAmelCase : List[str] = pred_logits.topk(1 , dim=-1 , largest=_lowerCamelCase , sorted=_lowerCamelCase ) _UpperCAmelCase : Union[str, Any] = preds_index.view(-1 , _lowerCamelCase )[:, 1:] _UpperCAmelCase : List[str] = decoder(_lowerCamelCase ) _UpperCAmelCase : Any = torch.nn.functional.softmax(_lowerCamelCase , dim=2 ).max(dim=2 ) _UpperCAmelCase : Union[str, Any] = preds_max_prob[:, 1:] for index in range(_lowerCamelCase ): _UpperCAmelCase : str = preds_str[index].find(_lowerCamelCase ) _UpperCAmelCase : List[Any] = preds_str[index][:pred_eos] _UpperCAmelCase : str = preds_index[index].cpu().tolist() _UpperCAmelCase : List[str] = pred_index.index(_lowerCamelCase ) if eos_token in pred_index else -1 _UpperCAmelCase : Any = preds_max_prob[index][: pred_eos_index + 1] _UpperCAmelCase : Any = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_lowerCamelCase ) conf_scores.append(_lowerCamelCase ) return dec_strs, conf_scores def a__ ( self : Dict , _lowerCamelCase : Optional[Any] ): _UpperCAmelCase : Tuple = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(_lowerCamelCase )] return decode_strs def a__ ( self : Optional[Any] , _lowerCamelCase : List[Any] ): return self.bpe_tokenizer.batch_decode(_lowerCamelCase ) def a__ ( self : Union[str, Any] , _lowerCamelCase : List[str] ): _UpperCAmelCase : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(_lowerCamelCase )] return decode_strs
718
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class _UpperCamelCase( SCREAMING_SNAKE_CASE ): __A: Optional[Any] = """microsoft/speecht5_tts""" __A: Tuple = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) __A: Any = """text_reader""" __A: Optional[Any] = SpeechTaProcessor __A: int = SpeechTaForTextToSpeech __A: Tuple = SpeechTaHifiGan __A: Optional[Any] = ["""text"""] __A: int = ["""audio"""] def a__ ( self : List[str] ): if self.post_processor is None: _UpperCAmelCase : Union[str, Any] = "microsoft/speecht5_hifigan" super().setup() def a__ ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=None ): _UpperCAmelCase : Any = self.pre_processor(text=_lowerCamelCase , return_tensors="pt" , truncation=_lowerCamelCase ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("Datasets needs to be installed if not passing speaker embeddings." ) _UpperCAmelCase : str = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" ) _UpperCAmelCase : Optional[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def a__ ( self : Union[str, Any] , _lowerCamelCase : List[str] ): with torch.no_grad(): return self.model.generate_speech(**_lowerCamelCase ) def a__ ( self : int , _lowerCamelCase : str ): with torch.no_grad(): return self.post_processor(_lowerCamelCase ).cpu().detach()
328
0
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker UpperCamelCase__ = '''CompVis/stable-diffusion-v1-1''' UpperCamelCase__ = '''CompVis/stable-diffusion-v1-2''' UpperCamelCase__ = '''CompVis/stable-diffusion-v1-3''' UpperCamelCase__ = '''CompVis/stable-diffusion-v1-4''' class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , _A : bool = True , ): '''simple docstring''' super()._init_() UpperCAmelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(_A ) UpperCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(_A ) UpperCAmelCase__ : str = StableDiffusionPipeline.from_pretrained(_A ) UpperCAmelCase__ : str = StableDiffusionPipeline( vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , requires_safety_checker=_A , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return {k: getattr(self , _A ) for k in self.config.keys() if not k.startswith('''_''' )} def lowercase_ ( self : Union[str, Any] , _A : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.enable_attention_slicing(_A ) @torch.no_grad() def lowercase_ ( self : List[Any] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Tuple , ): '''simple docstring''' return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def lowercase_ ( self : int , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[str] , ): '''simple docstring''' return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def lowercase_ ( self : Optional[int] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Any , ): '''simple docstring''' return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def lowercase_ ( self : Any , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Union[str, Any] , ): '''simple docstring''' return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def lowercase_ ( self : str , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Any = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_A ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase__ : List[Any] = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase__ : List[Any] = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase__ : List[Any] = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase__ : List[str] = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
75
"""simple docstring""" class __A : '''simple docstring''' def __init__( self : List[str] ,_snake_case : int ,_snake_case : str ,_snake_case : Optional[Any] ) -> int: """simple docstring""" lowercase__ : Tuple = None lowercase__ : str = None lowercase__ : Dict = graph self._normalize_graph(_snake_case ,_snake_case ) lowercase__ : Any = len(_snake_case ) lowercase__ : Any = None def UpperCAmelCase ( self : List[Any] ,_snake_case : List[str] ,_snake_case : List[str] ) -> List[str]: """simple docstring""" if sources is int: lowercase__ : Optional[int] = [sources] if sinks is int: lowercase__ : str = [sinks] if len(_snake_case ) == 0 or len(_snake_case ) == 0: return lowercase__ : str = sources[0] lowercase__ : Optional[int] = sinks[0] # make fake vertex if there are more # than one source or sink if len(_snake_case ) > 1 or len(_snake_case ) > 1: lowercase__ : Tuple = 0 for i in sources: max_input_flow += sum(self.graph[i] ) lowercase__ : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 ,0 ) self.graph.insert(0 ,[0] * size ) for i in sources: lowercase__ : Optional[Any] = max_input_flow lowercase__ : Dict = 0 lowercase__ : List[Any] = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: lowercase__ : List[str] = max_input_flow lowercase__ : int = size - 1 def UpperCAmelCase ( self : int ) -> List[str]: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCAmelCase ( self : str ,_snake_case : List[Any] ) -> int: """simple docstring""" lowercase__ : Tuple = algorithm(self ) class __A : '''simple docstring''' def __init__( self : int ,_snake_case : Tuple ) -> int: """simple docstring""" lowercase__ : int = flow_network lowercase__ : int = flow_network.verticesCount lowercase__ : Tuple = flow_network.sourceIndex lowercase__ : str = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that lowercase__ : Optional[Any] = flow_network.graph lowercase__ : Optional[int] = False def UpperCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" if not self.executed: self._algorithm() lowercase__ : Tuple = True def UpperCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" pass class __A ( A_ ): '''simple docstring''' def __init__( self : int ,_snake_case : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().__init__(_snake_case ) # use this to save your result lowercase__ : Union[str, Any] = -1 def UpperCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class __A ( A_ ): '''simple docstring''' def __init__( self : Union[str, Any] ,_snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" super().__init__(_snake_case ) lowercase__ : int = [[0] * self.verticies_count for i in range(self.verticies_count )] lowercase__ : List[str] = [0] * self.verticies_count lowercase__ : Tuple = [0] * self.verticies_count def UpperCAmelCase ( self : List[str] ) -> str: """simple docstring""" lowercase__ : str = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule lowercase__ : Union[str, Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list lowercase__ : Tuple = 0 while i < len(_snake_case ): lowercase__ : Dict = vertices_list[i] lowercase__ : Optional[Any] = self.heights[vertex_index] self.process_vertex(_snake_case ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 ,vertices_list.pop(_snake_case ) ) lowercase__ : Optional[int] = 0 else: i += 1 lowercase__ : Dict = sum(self.preflow[self.source_index] ) def UpperCAmelCase ( self : Any ,_snake_case : int ) -> List[Any]: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(_snake_case ,_snake_case ) self.relabel(_snake_case ) def UpperCAmelCase ( self : int ,_snake_case : int ,_snake_case : List[str] ) -> Tuple: """simple docstring""" lowercase__ : Tuple = min( self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> Tuple: """simple docstring""" lowercase__ : int = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): lowercase__ : Optional[int] = self.heights[to_index] if min_height is not None: lowercase__ : Optional[int] = min_height + 1 if __name__ == "__main__": lowerCAmelCase_ = [0] lowerCAmelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowerCAmelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowerCAmelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowerCAmelCase_ = flow_network.find_maximum_flow() print(F'''maximum flow is {maximum_flow}''')
560
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( snake_case_, unittest.TestCase ): '''simple docstring''' _snake_case = KandinskyVaaPriorPipeline _snake_case = ['''prompt'''] _snake_case = ['''prompt''', '''negative_prompt'''] _snake_case = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] _snake_case = False @property def UpperCAmelCase ( self ): '''simple docstring''' return 3_2 @property def UpperCAmelCase ( self ): '''simple docstring''' return 3_2 @property def UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim @property def UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase ( self ): '''simple docstring''' return 1_0_0 @property def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(lowerCamelCase__ ) @property def UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 1_2, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } UpperCamelCase = PriorTransformer(**lowerCamelCase__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 UpperCamelCase = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , ) UpperCamelCase = CLIPVisionModelWithProjection(lowerCamelCase__ ) return model @property def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , ) return image_processor def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_image_encoder UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_image_processor UpperCamelCase = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=lowerCamelCase__ , clip_sample_range=10.0 , ) UpperCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): '''simple docstring''' if str(lowerCamelCase__ ).startswith('''mps''' ): UpperCamelCase = torch.manual_seed(lowerCamelCase__ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCamelCase = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = '''cpu''' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase__ ) UpperCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) UpperCamelCase = output.image_embeds UpperCamelCase = pipe( **self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0] UpperCamelCase = image[0, -1_0:] UpperCamelCase = image_from_tuple[0, -1_0:] assert image.shape == (1, 3_2) UpperCamelCase = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = torch_device == '''cpu''' UpperCamelCase = True UpperCamelCase = False self._test_inference_batch_single_identical( test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , test_mean_pixel_difference=lowerCamelCase__ , ) @skip_mps def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = torch_device == '''cpu''' UpperCamelCase = False self._test_attention_slicing_forward_pass( test_max_difference=lowerCamelCase__ , test_mean_pixel_difference=lowerCamelCase__ , )
350
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case_ : List[Any] = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Union[str, Any] = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] snake_case_ : str = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] snake_case_ : Optional[int] = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): snake_case_ : List[str] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
350
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): UpperCamelCase__ = KandinskyVaaPipeline UpperCamelCase__ = [ 'image_embeds', 'negative_image_embeds', ] UpperCamelCase__ = ['image_embeds', 'negative_image_embeds'] UpperCamelCase__ = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] UpperCamelCase__ = False @property def _A( self ): return 32 @property def _A( self ): return 32 @property def _A( self ): return self.time_input_dim @property def _A( self ): return self.time_input_dim * 4 @property def _A( self ): return 1_00 @property def _A( self ): torch.manual_seed(0 ) lowercase ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase =UNetaDConditionModel(**snake_case_ ) return model @property def _A( self ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _A( self ): torch.manual_seed(0 ) lowercase =VQModel(**self.dummy_movq_kwargs ) return model def _A( self ): lowercase =self.dummy_unet lowercase =self.dummy_movq lowercase =DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case_ , ) lowercase ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _A( self , snake_case_ , snake_case_=0 ): lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case_ ) if str(snake_case_ ).startswith('''mps''' ): lowercase =torch.manual_seed(snake_case_ ) else: lowercase =torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) lowercase ={ '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def _A( self ): lowercase ='''cpu''' lowercase =self.get_dummy_components() lowercase =self.pipeline_class(**snake_case_ ) lowercase =pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) lowercase =pipe(**self.get_dummy_inputs(snake_case_ ) ) lowercase =output.images lowercase =pipe( **self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0] lowercase =image[0, -3:, -3:, -1] lowercase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase =np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): def _A( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A( self ): lowercase =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' ) lowercase =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case_ ) lowercase =KandinskyVaaPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) lowercase =pipeline.to(snake_case_ ) pipeline.set_progress_bar_config(disable=snake_case_ ) lowercase ='''red cat, 4k photo''' lowercase =torch.Generator(device='''cuda''' ).manual_seed(0 ) lowercase , lowercase =pipe_prior( snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowercase =torch.Generator(device='''cuda''' ).manual_seed(0 ) lowercase =pipeline( image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_00 , output_type='''np''' , ) lowercase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(snake_case_ , snake_case_ )
72
'''simple docstring''' from math import pi, sqrt, tan def UpperCamelCase ( lowercase_ : float ) -> float: '''simple docstring''' if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def UpperCamelCase ( lowercase_ : float ) -> float: '''simple docstring''' if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def UpperCamelCase ( lowercase_ : float ) -> float: '''simple docstring''' if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) lowercase =(height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(lowercase_ , 2 ) * torus_radius * tube_radius def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def UpperCamelCase ( lowercase_ : float ) -> float: '''simple docstring''' if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) lowercase =(sidea + sidea + sidea) / 2 lowercase =sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def UpperCamelCase ( lowercase_ : float ) -> float: '''simple docstring''' if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float: '''simple docstring''' if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def UpperCamelCase ( lowercase_ : int , lowercase_ : float ) -> float: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(F"""Rectangle: {area_rectangle(10, 20) = }""") print(F"""Square: {area_square(10) = }""") print(F"""Triangle: {area_triangle(10, 10) = }""") print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""") print(F"""Parallelogram: {area_parallelogram(10, 20) = }""") print(F"""Rhombus: {area_rhombus(10, 20) = }""") print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""") print(F"""Circle: {area_circle(20) = }""") print(F"""Ellipse: {area_ellipse(10, 20) = }""") print('''\nSurface Areas of various geometric shapes: \n''') print(F"""Cube: {surface_area_cube(20) = }""") print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""") print(F"""Sphere: {surface_area_sphere(20) = }""") print(F"""Hemisphere: {surface_area_hemisphere(20) = }""") print(F"""Cone: {surface_area_cone(10, 20) = }""") print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""") print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""") print(F"""Torus: {surface_area_torus(20, 10) = }""") print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""") print(F"""Square: {area_reg_polygon(4, 10) = }""") print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
72
1
'''simple docstring''' def __A ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
564
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ) def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): """simple docstring""" if dataset.ndim != value_array.ndim: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( "Wrong input data's dimensions... " f'dataset : {dataset.ndim}, value_array : {value_array.ndim}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) try: if dataset.shape[1] != value_array.shape[1]: __SCREAMING_SNAKE_CASE : Optional[int] = ( "Wrong input data's shape... " f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: __SCREAMING_SNAKE_CASE : Dict = ( "Input data have different datatype... " f'dataset : {dataset.dtype}, value_array : {value_array.dtype}' ) raise TypeError(_SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Optional[Any] = [] for value in value_array: __SCREAMING_SNAKE_CASE : int = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] ) __SCREAMING_SNAKE_CASE : int = dataset[0].tolist() for dataset_value in dataset[1:]: __SCREAMING_SNAKE_CASE : List[str] = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if dist > temp_dist: __SCREAMING_SNAKE_CASE : str = temp_dist __SCREAMING_SNAKE_CASE : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): """simple docstring""" return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE )) if __name__ == "__main__": import doctest doctest.testmod()
564
1
"""simple docstring""" import requests UpperCAmelCase ='''YOUR API KEY''' def _A ( _a : str , _a : str = giphy_api_key ): """simple docstring""" A = """+""".join(query.split() ) A = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}' A = requests.get(__lowercase ).json()["""data"""] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("\n".join(get_gifs("space ship")))
617
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: a__ : str =None try: import msvcrt except ImportError: a__ : List[str] =None try: import fcntl except ImportError: a__ : Any =None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a__ : Dict =OSError # Data # ------------------------------------------------ a__ : str =[ '''Timeout''', '''BaseFileLock''', '''WindowsFileLock''', '''UnixFileLock''', '''SoftFileLock''', '''FileLock''', ] a__ : Union[str, Any] ='''3.0.12''' a__ : Union[str, Any] =None def lowercase__ ( ) -> Tuple: """simple docstring""" global _logger __UpperCamelCase = _logger or logging.getLogger(__name__ ) return _logger class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Optional[Any] , __A : str ): __UpperCamelCase = lock_file return None def __str__( self : Any ): __UpperCamelCase = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Union[str, Any] ): __UpperCamelCase = lock return None def __enter__( self : int ): return self.lock def __exit__( self : List[str] , __A : int , __A : Dict , __A : List[Any] ): self.lock.release() return None class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : Optional[Any] , __A : str=-1 , __A : Any=None ): __UpperCamelCase = max_filename_length if max_filename_length is not None else 2_5_5 # Hash the filename if it's too long __UpperCamelCase = self.hash_filename_if_too_long(__A , __A ) # The path to the lock file. __UpperCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __UpperCamelCase = None # The default timeout value. __UpperCamelCase = timeout # We use this lock primarily for the lock counter. __UpperCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __UpperCamelCase = 0 return None @property def _lowerCamelCase ( self : List[Any] ): return self._lock_file @property def _lowerCamelCase ( self : Optional[int] ): return self._timeout @timeout.setter def _lowerCamelCase ( self : Any , __A : Optional[Any] ): __UpperCamelCase = float(__A ) return None def _lowerCamelCase ( self : Tuple ): raise NotImplementedError() def _lowerCamelCase ( self : int ): raise NotImplementedError() @property def _lowerCamelCase ( self : Tuple ): return self._lock_file_fd is not None def _lowerCamelCase ( self : List[str] , __A : int=None , __A : str=0.05 ): # Use the default timeout, if no timeout is provided. if timeout is None: __UpperCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __UpperCamelCase = id(self ) __UpperCamelCase = self._lock_file __UpperCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(__A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __UpperCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def _lowerCamelCase ( self : str , __A : str=False ): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __UpperCamelCase = id(self ) __UpperCamelCase = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __UpperCamelCase = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : Dict ): self.acquire() return self def __exit__( self : str , __A : Optional[int] , __A : List[Any] , __A : str ): self.release() return None def __del__( self : Any ): self.release(force=__A ) return None def _lowerCamelCase ( self : Any , __A : str , __A : int ): __UpperCamelCase = os.path.basename(__A ) if len(__A ) > max_length and max_length > 0: __UpperCamelCase = os.path.dirname(__A ) __UpperCamelCase = str(hash(__A ) ) __UpperCamelCase = filename[: max_length - len(__A ) - 8] + '...' + hashed_filename + '.lock' return os.path.join(__A , __A ) else: return path class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Optional[int] , __A : Optional[Any] , __A : Optional[Any]=-1 , __A : Dict=None ): from .file_utils import relative_to_absolute_path super().__init__(__A , timeout=__A , max_filename_length=__A ) __UpperCamelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __UpperCamelCase = os.open(self._lock_file , __A ) except OSError: pass else: try: msvcrt.locking(__A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__A ) else: __UpperCamelCase = fd return None def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self._lock_file_fd __UpperCamelCase = None msvcrt.locking(__A , msvcrt.LK_UNLCK , 1 ) os.close(__A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : List[str] , __A : Any=-1 , __A : Union[str, Any]=None ): __UpperCamelCase = os.statvfs(os.path.dirname(__A ) ).f_namemax super().__init__(__A , timeout=__A , max_filename_length=__A ) def _lowerCamelCase ( self : int ): __UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __UpperCamelCase = os.open(self._lock_file , __A ) try: fcntl.flock(__A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__A ) else: __UpperCamelCase = fd return None def _lowerCamelCase ( self : Dict ): # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __UpperCamelCase = self._lock_file_fd __UpperCamelCase = None fcntl.flock(__A , fcntl.LOCK_UN ) os.close(__A ) return None class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : str ): __UpperCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __UpperCamelCase = os.open(self._lock_file , __A ) except OSError: pass else: __UpperCamelCase = fd return None def _lowerCamelCase ( self : str ): os.close(self._lock_file_fd ) __UpperCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a__ : Optional[Any] =None if msvcrt: a__ : Any =WindowsFileLock elif fcntl: a__ : Union[str, Any] =UnixFileLock else: a__ : Dict =SoftFileLock if warnings is not None: warnings.warn('''only soft file lock is available''')
399
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase_ ( __snake_case : Union[str, Any] ) -> int: '''simple docstring''' snake_case__ :List[str] = args.pruning_method snake_case__ :Any = args.threshold snake_case__ :str = args.model_name_or_path.rstrip("/" ) snake_case__ :List[Any] = args.target_model_path print(F'Load fine-pruned model from {model_name_or_path}' ) snake_case__ :Optional[Any] = torch.load(os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) ) snake_case__ :List[str] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: snake_case__ :Dict = tensor print(F'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: snake_case__ :str = tensor print(F'Copied layer {name}' ) elif "bias" in name: snake_case__ :Tuple = tensor print(F'Copied layer {name}' ) else: if pruning_method == "magnitude": snake_case__ :Dict = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ ) snake_case__ :Tuple = tensor * mask print(F'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue snake_case__ :Dict = name[:-6] snake_case__ :Union[str, Any] = model[F'{prefix_}mask_scores'] snake_case__ :Any = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case__ :str = tensor * mask print(F'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue snake_case__ :Optional[int] = name[:-6] snake_case__ :Dict = model[F'{prefix_}mask_scores'] snake_case__ :List[str] = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) snake_case__ :List[str] = tensor * mask print(F'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue snake_case__ :Optional[Any] = name[:-6] snake_case__ :Optional[Any] = model[F'{prefix_}mask_scores'] snake_case__ , snake_case__ :Optional[int] = -0.1, 1.1 snake_case__ :Optional[Any] = torch.sigmoid(lowerCAmelCase__ ) snake_case__ :int = s * (r - l) + l snake_case__ :List[Any] = s_bar.clamp(min=0.0 , max=1.0 ) snake_case__ :str = tensor * mask print(F'Pruned layer {name}' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: snake_case__ :Tuple = os.path.join( os.path.dirname(lowerCAmelCase__ ) , F'bertarized_{os.path.basename(lowerCAmelCase__ )}' ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ ) print(F'\nCreated folder {target_model_path}' ) torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": __UpperCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __UpperCAmelCase : str = parser.parse_args() main(args)
705
def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case__ :List[str] = 4 snake_case__ :Optional[int] = (1 << p) - 1 for _ in range(p - 2 ): snake_case__ :List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
57
0
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class __UpperCAmelCase : """simple docstring""" @staticmethod def __lowerCAmelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" pass def __magic_name__ ( lowercase_ ) -> Any: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __a : Optional[int] = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase = pipeline( "document-question-answering" , model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) UpperCamelCase = INVOICE_URL UpperCamelCase = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , "" ) ) ) UpperCamelCase = "What is the placebo?" UpperCamelCase = [ { "image": load_image(SCREAMING_SNAKE_CASE ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase = dqa_pipeline(SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual( SCREAMING_SNAKE_CASE , [ [ {"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE ), "start": ANY(SCREAMING_SNAKE_CASE ), "end": ANY(SCREAMING_SNAKE_CASE )}, {"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE ), "start": ANY(SCREAMING_SNAKE_CASE ), "end": ANY(SCREAMING_SNAKE_CASE )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) UpperCamelCase = INVOICE_URL UpperCamelCase = "How many cats are there?" UpperCamelCase = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , SCREAMING_SNAKE_CASE ) UpperCamelCase = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , SCREAMING_SNAKE_CASE ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably UpperCamelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual(SCREAMING_SNAKE_CASE , [] ) # We can optionnally pass directly the words and bounding boxes UpperCamelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , words=SCREAMING_SNAKE_CASE , boxes=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual(SCREAMING_SNAKE_CASE , [] ) @slow @require_torch @require_detectrona @require_pytesseract def __lowerCAmelCase ( self ) -> str: """simple docstring""" UpperCamelCase = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) UpperCamelCase = INVOICE_URL UpperCamelCase = "What is the invoice number?" UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) UpperCamelCase = INVOICE_URL UpperCamelCase = "What is the invoice number?" UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=SCREAMING_SNAKE_CASE ) UpperCamelCase = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=SCREAMING_SNAKE_CASE , revision="3dc6de3" , ) UpperCamelCase = INVOICE_URL UpperCamelCase = "What is the invoice number?" UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) UpperCamelCase = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) UpperCamelCase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) UpperCamelCase = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , "" ) ) ) # This model should also work if `image` is set to None UpperCamelCase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=SCREAMING_SNAKE_CASE ) UpperCamelCase = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=SCREAMING_SNAKE_CASE , revision="3dc6de3" , max_seq_len=50 , ) UpperCamelCase = INVOICE_URL UpperCamelCase = "What is the invoice number?" UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) UpperCamelCase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) UpperCamelCase = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , "" ) ) ) # This model should also work if `image` is set to None UpperCamelCase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) UpperCamelCase = INVOICE_URL UpperCamelCase = "What is the invoice number?" UpperCamelCase = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass
606
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __a : Optional[int] = 1_0 def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' for i in range(lowercase_ , lowercase_ ): if array[i] == target: return i return -1 def __magic_name__ ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' UpperCamelCase = 0 UpperCamelCase = len(lowercase_ ) while left <= right: if right - left < precision: return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) UpperCamelCase = (left + right) // 3 + 1 UpperCamelCase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCamelCase = one_third - 1 elif array[two_third] < target: UpperCamelCase = two_third + 1 else: UpperCamelCase = one_third + 1 UpperCamelCase = two_third - 1 else: return -1 def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if left < right: if right - left < precision: return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) UpperCamelCase = (left + right) // 3 + 1 UpperCamelCase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowercase_ , one_third - 1 , lowercase_ , lowercase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowercase_ , lowercase_ , lowercase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase_ , lowercase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __a : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip() __a : Tuple = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." __a : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip()) __a : Optional[Any] = ite_ternary_search(collection, target) __a : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'Iterative search: {target} found at positions: {resulta}') print(F'Recursive search: {target} found at positions: {resulta}') else: print("""Not found""")
606
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class _lowercase ( __UpperCAmelCase ): lowercase_ = 'gpt_bigcode' lowercase_ = ['past_key_values'] lowercase_ = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , UpperCAmelCase_=50257 , UpperCAmelCase_=1024 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=None , UpperCAmelCase_="gelu_pytorch_tanh" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=0.02 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=50256 , UpperCAmelCase_=50256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> List[Any]: lowerCamelCase : List[Any] = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : Optional[Any] = n_embd lowerCamelCase : Dict = n_layer lowerCamelCase : int = n_head lowerCamelCase : Dict = n_inner lowerCamelCase : int = activation_function lowerCamelCase : List[str] = resid_pdrop lowerCamelCase : Tuple = embd_pdrop lowerCamelCase : str = attn_pdrop lowerCamelCase : List[Any] = layer_norm_epsilon lowerCamelCase : Any = initializer_range lowerCamelCase : Optional[int] = scale_attn_weights lowerCamelCase : Optional[Any] = use_cache lowerCamelCase : Any = attention_softmax_in_fpaa lowerCamelCase : str = scale_attention_softmax_in_fpaa lowerCamelCase : Optional[int] = multi_query lowerCamelCase : Optional[int] = bos_token_id lowerCamelCase : Optional[Any] = eos_token_id super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
133
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( a_, a_ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(a_ ): print(F"""{i}\t\t{d}""" ) def UpperCAmelCase ( a_, a_, a_ ): '''simple docstring''' for j in range(a_ ): lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: return True return False def UpperCAmelCase ( a_, a_, a_, a_ ): '''simple docstring''' lowerCamelCase : str = [float('inf' )] * vertex_count lowerCamelCase : str = 0.0 for _ in range(vertex_count - 1 ): for j in range(a_ ): lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: lowerCamelCase : Dict = distance[u] + w lowerCamelCase : Any = check_negative_cycle(a_, a_, a_ ) if negative_cycle_exists: raise Exception('Negative cycle found' ) return distance if __name__ == "__main__": import doctest doctest.testmod() _A = int(input('Enter number of vertices: ').strip()) _A = int(input('Enter number of edges: ').strip()) _A = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _A , _A , _A = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _A = {'src': src, 'dst': dest, 'weight': weight} _A = int(input('\nEnter shortest path source:').strip()) _A = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
133
1
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") __A = get_tests_dir("fixtures/test_sentencepiece_bpe.model") __A = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ): a__ : Optional[Any] = CamembertTokenizer a__ : Dict = CamembertTokenizerFast a__ : Any = True a__ : str = True def lowerCamelCase_ ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case : List[Any] = CamembertTokenizer(SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Any = "<pad>" snake_case : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_004 ) def lowerCamelCase_ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_005 ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[str] = CamembertTokenizer(SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) snake_case : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) snake_case : Union[str, Any] = "I was born in 92000, and this is falsé." snake_case : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE ) snake_case : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) snake_case : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) snake_case : int = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) snake_case : Any = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return snake_case : Union[str, Any] = self.get_tokenizer() snake_case : List[str] = self.get_rust_tokenizer() snake_case : int = "I was born in 92000, and this is falsé." snake_case : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) snake_case : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : Tuple = self.get_rust_tokenizer() snake_case : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. snake_case : int = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=SCREAMING_SNAKE_CASE , )
134
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __A = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class lowerCamelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ): """simple docstring""" snake_case : int = None snake_case : Dict = os.path.abspath(os.path.join("examples" , "by_feature" ) ) snake_case : Optional[int] = os.path.abspath("examples" ) for item in os.listdir(SCREAMING_SNAKE_CASE ): if item not in EXCLUDE_EXAMPLES: snake_case : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if os.path.isfile(SCREAMING_SNAKE_CASE ) and ".py" in item_path: with self.subTest( tested_script=SCREAMING_SNAKE_CASE , feature_script=SCREAMING_SNAKE_CASE , tested_section="main()" if parser_only else "training_function()" , ): snake_case : Dict = compare_against_test( os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : str = "\n".join(SCREAMING_SNAKE_CASE ) if special_strings is not None: for string in special_strings: snake_case : int = diff.replace(SCREAMING_SNAKE_CASE , "" ) self.assertEqual(SCREAMING_SNAKE_CASE , "" ) def lowerCamelCase_ ( self ): """simple docstring""" self.one_complete_example("complete_nlp_example.py" , SCREAMING_SNAKE_CASE ) self.one_complete_example("complete_nlp_example.py" , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Tuple = os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) snake_case : Tuple = [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.one_complete_example("complete_cv_example.py" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class lowerCamelCase__ ( lowerCamelCase_ ): a__ : Dict = False @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" super().setUpClass() snake_case : int = tempfile.mkdtemp() snake_case : Optional[int] = os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) snake_case : Dict = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : str = F''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Union[str, Any] = F''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() snake_case : Union[str, Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} '''.split() snake_case : Tuple = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE ) self.assertNotIn("epoch 0:" , SCREAMING_SNAKE_CASE ) self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : int = F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} '''.split() snake_case : Tuple = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE ) if torch.cuda.is_available(): snake_case : Optional[Any] = torch.cuda.device_count() else: snake_case : str = 1 if num_processes > 1: self.assertNotIn("epoch 0:" , SCREAMING_SNAKE_CASE ) self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE ) else: self.assertIn("epoch 0:" , SCREAMING_SNAKE_CASE ) self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): snake_case : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE ) snake_case : Tuple = re.findall("({.+})" , SCREAMING_SNAKE_CASE ) snake_case : str = [r for r in results if "accuracy" in r][-1] snake_case : List[str] = ast.literal_eval(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(results["accuracy"] , 0.75 ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def lowerCamelCase_ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: snake_case : Union[str, Any] = F''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , "tracking" ) ) ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[Any] = ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Tuple = ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
134
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : str = "mgp-str" def __init__( self : Tuple , _UpperCAmelCase : int=[32, 1_28] , _UpperCAmelCase : str=4 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : str=27 , _UpperCAmelCase : List[str]=38 , _UpperCAmelCase : Optional[Any]=5_02_57 , _UpperCAmelCase : Union[str, Any]=3_05_22 , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Dict=4.0 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=0.02 , **_UpperCAmelCase : int , ) -> List[Any]: """simple docstring""" super().__init__(**_UpperCAmelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = max_token_length __lowercase = num_character_labels __lowercase = num_bpe_labels __lowercase = num_wordpiece_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = mlp_ratio __lowercase = distilled __lowercase = layer_norm_eps __lowercase = drop_rate __lowercase = qkv_bias __lowercase = attn_drop_rate __lowercase = drop_path_rate __lowercase = output_aa_attentions __lowercase = initializer_range
688
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = embedding_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_hidden_groups __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def a__ ( self : Any ) -> List[Any]: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]: """simple docstring""" __lowercase = AlbertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple: """simple docstring""" __lowercase = AlbertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = AlbertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int: """simple docstring""" __lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" __lowercase = self.num_labels __lowercase = AlbertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = self.num_labels __lowercase = AlbertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: """simple docstring""" __lowercase = self.num_choices __lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : int = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ : Dict = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ : Optional[Any] = True def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple: """simple docstring""" __lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): __lowercase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) __lowercase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def a__ ( self : str ) -> str: """simple docstring""" __lowercase = AlbertModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a__ ( self : int ) -> List[Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a__ ( self : Tuple ) -> Any: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase = type self.model_tester.create_and_check_model(*_UpperCAmelCase ) @slow def a__ ( self : int ) -> Any: """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = AlbertModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_torch class A__ ( unittest.TestCase ): @slow def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase = AlbertModel.from_pretrained('albert-base-v2' ) __lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __lowercase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
688
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCAmelCase__ ( snake_case__ ): snake_case_ = '''table-transformer''' snake_case_ = ['''past_key_values'''] snake_case_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , A__=True , A__=None , A__=3 , A__=100 , A__=6 , A__=2048 , A__=8 , A__=6 , A__=2048 , A__=8 , A__=0.0 , A__=0.0 , A__=True , A__="relu" , A__=256 , A__=0.1 , A__=0.0 , A__=0.0 , A__=0.02 , A__=1.0 , A__=False , A__="sine" , A__="resnet50" , A__=True , A__=False , A__=1 , A__=5 , A__=2 , A__=1 , A__=1 , A__=5 , A__=2 , A__=0.1 , **A__ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_: List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(A__ , A__ ): UpperCAmelCase_: Optional[Any] = backbone_config.get("model_type" ) UpperCAmelCase_: Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_: Any = config_class.from_dict(A__ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = None, None, None UpperCAmelCase_: List[str] = use_timm_backbone UpperCAmelCase_: str = backbone_config UpperCAmelCase_: Tuple = num_channels UpperCAmelCase_: Optional[Any] = num_queries UpperCAmelCase_: str = d_model UpperCAmelCase_: Any = encoder_ffn_dim UpperCAmelCase_: int = encoder_layers UpperCAmelCase_: int = encoder_attention_heads UpperCAmelCase_: Any = decoder_ffn_dim UpperCAmelCase_: Optional[Any] = decoder_layers UpperCAmelCase_: Tuple = decoder_attention_heads UpperCAmelCase_: Any = dropout UpperCAmelCase_: str = attention_dropout UpperCAmelCase_: str = activation_dropout UpperCAmelCase_: int = activation_function UpperCAmelCase_: Any = init_std UpperCAmelCase_: List[Any] = init_xavier_std UpperCAmelCase_: List[Any] = encoder_layerdrop UpperCAmelCase_: str = decoder_layerdrop UpperCAmelCase_: Optional[Any] = encoder_layers UpperCAmelCase_: Optional[Any] = auxiliary_loss UpperCAmelCase_: Tuple = position_embedding_type UpperCAmelCase_: Optional[int] = backbone UpperCAmelCase_: List[Any] = use_pretrained_backbone UpperCAmelCase_: str = dilation # Hungarian matcher UpperCAmelCase_: Dict = class_cost UpperCAmelCase_: Union[str, Any] = bbox_cost UpperCAmelCase_: int = giou_cost # Loss coefficients UpperCAmelCase_: Dict = mask_loss_coefficient UpperCAmelCase_: str = dice_loss_coefficient UpperCAmelCase_: List[str] = bbox_loss_coefficient UpperCAmelCase_: Optional[Any] = giou_loss_coefficient UpperCAmelCase_: Tuple = eos_coefficient super().__init__(is_encoder_decoder=A__ , **A__ ) @property def snake_case_ ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case_ ( self ): """simple docstring""" return self.d_model class UpperCAmelCase__ ( snake_case__ ): snake_case_ = version.parse('''1.11''' ) @property def snake_case_ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def snake_case_ ( self ): """simple docstring""" return 1E-5 @property def snake_case_ ( self ): """simple docstring""" return 12
137
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
137
1
"""simple docstring""" import math import tensorflow as tf from packaging import version def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = tf.convert_to_tensor(__lowercase ) lowerCamelCase__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = tf.convert_to_tensor(__lowercase ) lowerCamelCase__ = tf.cast(math.pi , x.dtype ) lowerCamelCase__ = tf.cast(0.04_47_15 , x.dtype ) lowerCamelCase__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowercase , 3 )) )) return x * cdf def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = tf.convert_to_tensor(__lowercase ) return x * tf.tanh(tf.math.softplus(__lowercase ) ) def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = tf.convert_to_tensor(__lowercase ) lowerCamelCase__ = tf.cast(0.04_47_15 , x.dtype ) lowerCamelCase__ = tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = tf.convert_to_tensor(__lowercase ) lowerCamelCase__ = tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def _A ( __lowercase ): """simple docstring""" return tf.clip_by_value(_gelu(__lowercase ) , -10 , 10 ) def _A ( __lowercase , __lowercase=-1 ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ = tf.split(__lowercase , 2 , axis=__lowercase ) return a * tf.math.sigmoid(__lowercase ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def _A ( __lowercase ): """simple docstring""" return tf.keras.activations.gelu(__lowercase , approximate=__lowercase ) __magic_name__ = tf.keras.activations.gelu __magic_name__ = approximate_gelu_wrap else: __magic_name__ = _gelu __magic_name__ = _gelu_new __magic_name__ = { """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def _A ( __lowercase ): """simple docstring""" if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
258
"""simple docstring""" from itertools import count def _A ( __lowercase = 50 ): """simple docstring""" lowerCamelCase__ = [1] * min_block_length for n in count(__lowercase ): fill_count_functions.append(1 ) for block_length in range(__lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 100_0000: break return n if __name__ == "__main__": print(F'{solution() = }')
258
1
"""simple docstring""" # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _lowerCAmelCase : Union[str, Any] = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
46
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { '''microsoft/table-transformer-detection''': ( '''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json''' ), } class snake_case_ ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_: int = """table-transformer""" SCREAMING_SNAKE_CASE_: int = ["""past_key_values"""] SCREAMING_SNAKE_CASE_: int = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __a=True , __a=None , __a=3 , __a=100 , __a=6 , __a=2048 , __a=8 , __a=6 , __a=2048 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=256 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) A__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(__a , __a ): A__ = backbone_config.get('model_type' ) A__ = CONFIG_MAPPING[backbone_model_type] A__ = config_class.from_dict(__a ) # set timm attributes to None A__ , A__ , A__ = None, None, None A__ = use_timm_backbone A__ = backbone_config A__ = num_channels A__ = num_queries A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = init_xavier_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = encoder_layers A__ = auxiliary_loss A__ = position_embedding_type A__ = backbone A__ = use_pretrained_backbone A__ = dilation # Hungarian matcher A__ = class_cost A__ = bbox_cost A__ = giou_cost # Loss coefficients A__ = mask_loss_coefficient A__ = dice_loss_coefficient A__ = bbox_loss_coefficient A__ = giou_loss_coefficient A__ = eos_coefficient super().__init__(is_encoder_decoder=__a , **__a ) @property def _UpperCAmelCase ( self ): """simple docstring""" return self.encoder_attention_heads @property def _UpperCAmelCase ( self ): """simple docstring""" return self.d_model class snake_case_ ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_: Tuple = version.parse("""1.11""" ) @property def _UpperCAmelCase ( self ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def _UpperCAmelCase ( self ): """simple docstring""" return 1E-5 @property def _UpperCAmelCase ( self ): """simple docstring""" return 12
260
0
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __UpperCAmelCase :Optional[int] = TypeVar("KEY") __UpperCAmelCase :Tuple = TypeVar("VAL") @dataclass(frozen=_a , slots=_a ) class a ( Generic[KEY, VAL] ): """simple docstring""" SCREAMING_SNAKE_CASE : KEY SCREAMING_SNAKE_CASE : VAL class a ( _Item ): """simple docstring""" def __init__( self : Optional[int] ) -> None: super().__init__(snake_case , snake_case ) def __bool__( self : Dict ) -> bool: return False __UpperCAmelCase :Optional[Any] = _DeletedItem() class a ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : str , snake_case : int = 8 , snake_case : float = 0.75 ) -> None: __UpperCAmelCase : Dict = initial_block_size __UpperCAmelCase : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __UpperCAmelCase : str = capacity_factor __UpperCAmelCase : Optional[int] = 0 def lowerCamelCase__ ( self : Optional[Any] , snake_case : KEY ) -> int: return hash(snake_case ) % len(self._buckets ) def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> int: return (ind + 1) % len(self._buckets ) def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : KEY , snake_case : VAL ) -> bool: __UpperCAmelCase : int = self._buckets[ind] if not stored: __UpperCAmelCase : List[str] = _Item(snake_case , snake_case ) self._len += 1 return True elif stored.key == key: __UpperCAmelCase : int = _Item(snake_case , snake_case ) return True else: return False def lowerCamelCase__ ( self : str ) -> bool: __UpperCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(snake_case ) def lowerCamelCase__ ( self : List[str] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __UpperCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> None: __UpperCAmelCase : Union[str, Any] = self._buckets __UpperCAmelCase : Union[str, Any] = [None] * new_size __UpperCAmelCase : List[str] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def lowerCamelCase__ ( self : Optional[int] ) -> None: self._resize(len(self._buckets ) * 2 ) def lowerCamelCase__ ( self : List[str] ) -> None: self._resize(len(self._buckets ) // 2 ) def lowerCamelCase__ ( self : Union[str, Any] , snake_case : KEY ) -> Iterator[int]: __UpperCAmelCase : Tuple = self._get_bucket_index(snake_case ) for _ in range(len(self._buckets ) ): yield ind __UpperCAmelCase : Dict = self._get_next_ind(snake_case ) def lowerCamelCase__ ( self : str , snake_case : KEY , snake_case : VAL ) -> None: for ind in self._iterate_buckets(snake_case ): if self._try_set(snake_case , snake_case , snake_case ): break def __setitem__( self : List[str] , snake_case : KEY , snake_case : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(snake_case , snake_case ) def __delitem__( self : Any , snake_case : KEY ) -> None: for ind in self._iterate_buckets(snake_case ): __UpperCAmelCase : List[Any] = self._buckets[ind] if item is None: raise KeyError(snake_case ) if item is _deleted: continue if item.key == key: __UpperCAmelCase : Optional[Any] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : List[str] , snake_case : KEY ) -> VAL: for ind in self._iterate_buckets(snake_case ): __UpperCAmelCase : Optional[int] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(snake_case ) def __len__( self : List[Any] ) -> int: return self._len def __iter__( self : str ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __UpperCAmelCase : str = ''' ,'''.join( f'{item.key}: {item.val}' for item in self._buckets if item ) return f'HashMap({val_string})'
266
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class a : """simple docstring""" def __init__( self : Union[str, Any] , snake_case : str , snake_case : Union[str, Any]=13 , snake_case : int=7 , snake_case : Union[str, Any]=True , snake_case : Tuple=True , snake_case : Tuple=True , snake_case : Union[str, Any]=True , snake_case : Tuple=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : int=4 , snake_case : Union[str, Any]=37 , snake_case : Union[str, Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Any=512 , snake_case : Dict=16 , snake_case : Dict=2 , snake_case : int=0.02 , snake_case : Dict=3 , snake_case : Tuple=4 , snake_case : Any=None , ) -> Tuple: __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : List[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : Union[str, Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : List[str] = num_choices __UpperCAmelCase : List[str] = scope def lowerCamelCase__ ( self : Optional[int] ) -> Any: __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Optional[int] = None if self.use_input_mask: __UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Optional[int] = None if self.use_token_type_ids: __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : int = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Union[str, Any] ) -> int: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple ) -> Dict: __UpperCAmelCase : Any = NystromformerModel(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) __UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case ) __UpperCAmelCase : Dict = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Dict , snake_case : int , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : Tuple ) -> List[Any]: __UpperCAmelCase : List[Any] = NystromformerForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = NystromformerForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : List[str] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : int , snake_case : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : Any ) -> List[str]: __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = NystromformerForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str] ) -> Optional[Any]: __UpperCAmelCase : str = self.num_labels __UpperCAmelCase : int = NystromformerForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] ) -> Any: __UpperCAmelCase : List[str] = self.num_choices __UpperCAmelCase : List[str] = NystromformerForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : int = config_and_inputs __UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( _a , _a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Union[str, Any] = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = False def lowerCamelCase__ ( self : Dict ) -> List[str]: __UpperCAmelCase : str = NystromformerModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def lowerCamelCase__ ( self : int ) -> Any: self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase__ ( self : Dict ) -> Tuple: __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[str] = type self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase__ ( self : Any ) -> List[Any]: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]: __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case ) def lowerCamelCase__ ( self : str ) -> Optional[Any]: __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) def lowerCamelCase__ ( self : str ) -> Union[str, Any]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def lowerCamelCase__ ( self : str ) -> int: __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def lowerCamelCase__ ( self : Any ) -> Union[str, Any]: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[Any] = NystromformerModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_torch class a ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : List[str] ) -> List[str]: __UpperCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) __UpperCAmelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(snake_case )[0] __UpperCAmelCase : Tuple = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , snake_case ) __UpperCAmelCase : Tuple = torch.tensor( [[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self : Optional[int] ) -> str: __UpperCAmelCase : str = '''the [MASK] of Belgium is Brussels''' __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) __UpperCAmelCase : Tuple = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) __UpperCAmelCase : Optional[Any] = tokenizer(snake_case , return_tensors='''pt''' ) with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(encoding.input_ids ).logits __UpperCAmelCase : str = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(snake_case ) , '''capital''' )
266
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowercase__ : @staticmethod def UpperCAmelCase__ ( *snake_case__ : List[str] , **snake_case__ : str ): pass @is_pipeline_test @require_vision @require_timm @require_torch class lowercase__ ( unittest.TestCase ): _UpperCAmelCase :Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] ): lowerCamelCase_ : Tuple =ObjectDetectionPipeline(model=snake_case__ , image_processor=snake_case__ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Any , snake_case__ : int ): lowerCamelCase_ : Union[str, Any] =object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(snake_case__ ) , 0 ) for detected_object in outputs: self.assertEqual( snake_case__ , { "score": ANY(snake_case__ ), "label": ANY(snake_case__ ), "box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )}, } , ) import datasets lowerCamelCase_ : Any =datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) lowerCamelCase_ : Dict =[ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] lowerCamelCase_ : Any =object_detector(snake_case__ , threshold=0.0 ) self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for outputs in batch_outputs: self.assertGreater(len(snake_case__ ) , 0 ) for detected_object in outputs: self.assertEqual( snake_case__ , { "score": ANY(snake_case__ ), "label": ANY(snake_case__ ), "box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def UpperCAmelCase__ ( self : str ): pass @require_torch def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Any ="hf-internal-testing/tiny-detr-mobilenetsv3" lowerCamelCase_ : List[Any] =AutoModelForObjectDetection.from_pretrained(snake_case__ ) lowerCamelCase_ : int =AutoFeatureExtractor.from_pretrained(snake_case__ ) lowerCamelCase_ : Union[str, Any] =ObjectDetectionPipeline(model=snake_case__ , feature_extractor=snake_case__ ) lowerCamelCase_ : Tuple =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) lowerCamelCase_ : Optional[Any] =object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ [ {"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : Dict ="facebook/detr-resnet-50" lowerCamelCase_ : Any =AutoModelForObjectDetection.from_pretrained(snake_case__ ) lowerCamelCase_ : List[Any] =AutoFeatureExtractor.from_pretrained(snake_case__ ) lowerCamelCase_ : Union[str, Any] =ObjectDetectionPipeline(model=snake_case__ , feature_extractor=snake_case__ ) lowerCamelCase_ : Dict =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) lowerCamelCase_ : Optional[Any] =object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ [ {"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Union[str, Any] ="facebook/detr-resnet-50" lowerCamelCase_ : Any =pipeline("object-detection" , model=snake_case__ ) lowerCamelCase_ : Dict =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) lowerCamelCase_ : List[str] =object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ [ {"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Any =0.9_985 lowerCamelCase_ : Optional[Any] ="facebook/detr-resnet-50" lowerCamelCase_ : Optional[int] =pipeline("object-detection" , model=snake_case__ ) lowerCamelCase_ : Union[str, Any] =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : List[str] ="Narsil/layoutlmv3-finetuned-funsd" lowerCamelCase_ : int =0.9_993 lowerCamelCase_ : int =pipeline("object-detection" , model=snake_case__ , threshold=snake_case__ ) lowerCamelCase_ : int =object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.9_993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9_993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
153
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase__ ( snake_case__ ): _UpperCAmelCase :Union[str, Any] = (PNDMScheduler,) _UpperCAmelCase :Tuple = (("num_inference_steps", 50),) def UpperCAmelCase__ ( self : Any , **snake_case__ : Optional[int] ): lowerCamelCase_ : Optional[int] ={ "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**snake_case__ ) return config def UpperCAmelCase__ ( self : Any , snake_case__ : List[Any]=0 , **snake_case__ : Union[str, Any] ): lowerCamelCase_ : List[Any] =dict(self.forward_default_kwargs ) lowerCamelCase_ : int =kwargs.pop("num_inference_steps" , snake_case__ ) lowerCamelCase_ : Union[str, Any] =self.dummy_sample lowerCamelCase_ : Optional[int] =0.1 * sample lowerCamelCase_ : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCamelCase_ : Tuple =self.get_scheduler_config(**snake_case__ ) lowerCamelCase_ : Dict =scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals lowerCamelCase_ : List[str] =dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) lowerCamelCase_ : Tuple =scheduler_class.from_pretrained(snake_case__ ) new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals lowerCamelCase_ : Optional[int] =dummy_past_residuals[:] lowerCamelCase_ : Dict =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCamelCase_ : List[str] =new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCamelCase_ : str =scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCamelCase_ : List[str] =new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Union[str, Any] ): pass def UpperCAmelCase__ ( self : int , snake_case__ : int=0 , **snake_case__ : int ): lowerCamelCase_ : int =dict(self.forward_default_kwargs ) lowerCamelCase_ : int =kwargs.pop("num_inference_steps" , snake_case__ ) lowerCamelCase_ : List[Any] =self.dummy_sample lowerCamelCase_ : str =0.1 * sample lowerCamelCase_ : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCamelCase_ : Any =self.get_scheduler_config() lowerCamelCase_ : str =scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase_ : Dict =dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) lowerCamelCase_ : Union[str, Any] =scheduler_class.from_pretrained(snake_case__ ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase_ : Optional[Any] =dummy_past_residuals[:] lowerCamelCase_ : Optional[Any] =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCamelCase_ : Optional[Any] =new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCamelCase_ : str =scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCamelCase_ : Union[str, Any] =new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : str , **snake_case__ : Optional[Any] ): lowerCamelCase_ : Optional[int] =self.scheduler_classes[0] lowerCamelCase_ : int =self.get_scheduler_config(**snake_case__ ) lowerCamelCase_ : Dict =scheduler_class(**snake_case__ ) lowerCamelCase_ : List[str] =10 lowerCamelCase_ : str =self.dummy_model() lowerCamelCase_ : List[str] =self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for i, t in enumerate(scheduler.prk_timesteps ): lowerCamelCase_ : Any =model(snake_case__ , snake_case__ ) lowerCamelCase_ : int =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): lowerCamelCase_ : Tuple =model(snake_case__ , snake_case__ ) lowerCamelCase_ : Optional[Any] =scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def UpperCAmelCase__ ( self : str ): lowerCamelCase_ : Optional[Any] =dict(self.forward_default_kwargs ) lowerCamelCase_ : List[str] =kwargs.pop("num_inference_steps" , snake_case__ ) for scheduler_class in self.scheduler_classes: lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config() lowerCamelCase_ : int =scheduler_class(**snake_case__ ) lowerCamelCase_ : Dict =self.dummy_sample lowerCamelCase_ : Dict =0.1 * sample if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ): scheduler.set_timesteps(snake_case__ ) elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ): lowerCamelCase_ : Dict =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase_ : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCamelCase_ : int =dummy_past_residuals[:] lowerCamelCase_ : int =scheduler.step_prk(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample lowerCamelCase_ : str =scheduler.step_prk(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCamelCase_ : List[Any] =scheduler.step_plms(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample lowerCamelCase_ : Optional[Any] =scheduler.step_plms(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase__ ( self : Tuple ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase__ ( self : str ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) lowerCamelCase_ : List[Any] =self.scheduler_classes[0] lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config(steps_offset=1 ) lowerCamelCase_ : Union[str, Any] =scheduler_class(**snake_case__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCAmelCase__ ( self : str ): for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def UpperCAmelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def UpperCAmelCase__ ( self : Optional[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase__ ( self : str ): for t in [1, 5, 10]: self.check_over_forward(time_step=snake_case__ ) def UpperCAmelCase__ ( self : Any ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=snake_case__ ) def UpperCAmelCase__ ( self : int ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 lowerCamelCase_ : Any =27 for scheduler_class in self.scheduler_classes: lowerCamelCase_ : Any =self.dummy_sample lowerCamelCase_ : Dict =0.1 * sample lowerCamelCase_ : Optional[Any] =self.get_scheduler_config() lowerCamelCase_ : Any =scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): lowerCamelCase_ : str =scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample def UpperCAmelCase__ ( self : List[str] ): with self.assertRaises(snake_case__ ): lowerCamelCase_ : Tuple =self.scheduler_classes[0] lowerCamelCase_ : Union[str, Any] =self.get_scheduler_config() lowerCamelCase_ : List[Any] =scheduler_class(**snake_case__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : List[Any] =self.full_loop() lowerCamelCase_ : List[Any] =torch.sum(torch.abs(snake_case__ ) ) lowerCamelCase_ : List[str] =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 198.1_318 ) < 1E-2 assert abs(result_mean.item() - 0.2_580 ) < 1E-3 def UpperCAmelCase__ ( self : int ): lowerCamelCase_ : List[Any] =self.full_loop(prediction_type="v_prediction" ) lowerCamelCase_ : Tuple =torch.sum(torch.abs(snake_case__ ) ) lowerCamelCase_ : Dict =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 67.3_986 ) < 1E-2 assert abs(result_mean.item() - 0.0_878 ) < 1E-3 def UpperCAmelCase__ ( self : str ): # We specify different beta, so that the first alpha is 0.99 lowerCamelCase_ : Tuple =self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) lowerCamelCase_ : List[str] =torch.sum(torch.abs(snake_case__ ) ) lowerCamelCase_ : Optional[int] =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 230.0_399 ) < 1E-2 assert abs(result_mean.item() - 0.2_995 ) < 1E-3 def UpperCAmelCase__ ( self : str ): # We specify different beta, so that the first alpha is 0.99 lowerCamelCase_ : int =self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) lowerCamelCase_ : Optional[Any] =torch.sum(torch.abs(snake_case__ ) ) lowerCamelCase_ : str =torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 186.9_482 ) < 1E-2 assert abs(result_mean.item() - 0.2_434 ) < 1E-3
153
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def _lowerCAmelCase ( _a : Dict ) -> Union[str, Any]: lowerCAmelCase_ : int = {} lowerCAmelCase_ : List[Any] = job["""started_at"""] lowerCAmelCase_ : Optional[int] = job["""completed_at"""] lowerCAmelCase_ : Optional[int] = date_parser.parse(_a ) lowerCAmelCase_ : Optional[Any] = date_parser.parse(_a ) lowerCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCAmelCase_ : List[Any] = start lowerCAmelCase_ : int = end lowerCAmelCase_ : Optional[int] = duration_in_min return job_info def _lowerCAmelCase ( _a : Tuple , _a : Optional[Any]=None ) -> int: lowerCAmelCase_ : Optional[Any] = None if token is not None: lowerCAmelCase_ : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'} lowerCAmelCase_ : Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' lowerCAmelCase_ : Tuple = requests.get(_a , headers=_a ).json() lowerCAmelCase_ : Any = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(_a ) for job in result["""jobs"""]} ) lowerCAmelCase_ : Any = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(_a ): lowerCAmelCase_ : Tuple = requests.get(url + F'&page={i + 2}' , headers=_a ).json() job_time.update({job["""name"""]: extract_time_from_single_job(_a ) for job in result["""jobs"""]} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : Any = get_job_time(args.workflow_run_id) UpperCAmelCase_ : Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
440
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ : Dict = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = ["""PoolFormerFeatureExtractor"""] UpperCAmelCase_ : str = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
440
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class _lowerCAmelCase( _a ): """simple docstring""" def __get__( self , _lowerCamelCase , _lowerCamelCase=None ): if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute' ) UpperCamelCase_: str = '__cached_' + self.fget.__name__ UpperCamelCase_: List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if cached is None: UpperCamelCase_: Dict = self.fget(lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return cached def snake_case (UpperCAmelCase__ ) -> int: UpperCamelCase_: Optional[int] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def snake_case (UpperCAmelCase__ ) -> int: if is_torch_fx_proxy(UpperCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase__ , np.ndarray ) def snake_case (UpperCAmelCase__ ) -> Any: return isinstance(UpperCAmelCase__ , np.ndarray ) def snake_case (UpperCAmelCase__ ) -> Optional[int]: return _is_numpy(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Tuple: import torch return isinstance(UpperCAmelCase__ , torch.Tensor ) def snake_case (UpperCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Optional[Any]: import torch return isinstance(UpperCAmelCase__ , torch.device ) def snake_case (UpperCAmelCase__ ) -> List[Any]: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Optional[Any]: import torch if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCamelCase_: int = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) else: return False return isinstance(UpperCAmelCase__ , torch.dtype ) def snake_case (UpperCAmelCase__ ) -> Tuple: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: import tensorflow as tf return isinstance(UpperCAmelCase__ , tf.Tensor ) def snake_case (UpperCAmelCase__ ) -> Any: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase__ , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(UpperCAmelCase__ ) return type(UpperCAmelCase__ ) == tf.Tensor def snake_case (UpperCAmelCase__ ) -> int: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> List[str]: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase__ , jnp.ndarray ) def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: return False if not is_flax_available() else _is_jax(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Any: if isinstance(UpperCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase__ ) for k, v in obj.items()} elif isinstance(UpperCAmelCase__ , (list, tuple) ): return [to_py_obj(UpperCAmelCase__ ) for o in obj] elif is_tf_tensor(UpperCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase__ ): return np.asarray(UpperCAmelCase__ ).tolist() elif isinstance(UpperCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def snake_case (UpperCAmelCase__ ) -> List[Any]: if isinstance(UpperCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase__ ) for k, v in obj.items()} elif isinstance(UpperCAmelCase__ , (list, tuple) ): return np.array(UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase__ ): return np.asarray(UpperCAmelCase__ ) else: return obj class _lowerCAmelCase( _a ): """simple docstring""" def _a ( self ): UpperCamelCase_: Any = fields(self ) # Safety and consistency checks if not len(lowerCAmelCase_ ): raise ValueError(f'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCamelCase_: Tuple = getattr(self , class_fields[0].name ) UpperCamelCase_: Optional[int] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(lowerCAmelCase_ ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCamelCase_: List[Any] = first_field.items() UpperCamelCase_: Dict = True else: try: UpperCamelCase_: List[Any] = iter(lowerCAmelCase_ ) UpperCamelCase_: Tuple = True except TypeError: UpperCamelCase_: int = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(lowerCAmelCase_ ): if ( not isinstance(lowerCAmelCase_ , (list, tuple) ) or not len(lowerCAmelCase_ ) == 2 or not isinstance(element[0] , lowerCAmelCase_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCamelCase_: Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCamelCase_: Optional[int] = element[1] elif first_field is not None: UpperCamelCase_: Optional[Any] = first_field else: for field in class_fields: UpperCamelCase_: Any = getattr(self , field.name ) if v is not None: UpperCamelCase_: Any = v def __delitem__( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def _a ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def _a ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def _a ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self , _lowerCamelCase ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCamelCase_: Optional[Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , _lowerCamelCase , _lowerCamelCase ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ ) super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ ) def __setitem__( self , _lowerCamelCase , _lowerCamelCase ): super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( self ): return tuple(self[k] for k in self.keys() ) class _lowerCAmelCase( _a , _a ): """simple docstring""" @classmethod def _a ( cls , _lowerCamelCase ): raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class _lowerCAmelCase( _a ): """simple docstring""" a : Tuple ='''longest''' a : Optional[Any] ='''max_length''' a : Union[str, Any] ='''do_not_pad''' class _lowerCAmelCase( _a ): """simple docstring""" a : Any ='''pt''' a : Dict ='''tf''' a : Optional[int] ='''np''' a : Dict ='''jax''' class _lowerCAmelCase: """simple docstring""" def __init__( self , _lowerCamelCase ): UpperCamelCase_: Optional[int] = context_managers UpperCamelCase_: Optional[Any] = ExitStack() def __enter__( self ): for context_manager in self.context_managers: self.stack.enter_context(lowerCAmelCase_ ) def __exit__( self , *_lowerCamelCase , **_lowerCamelCase ): self.stack.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: UpperCamelCase_: str = infer_framework(UpperCAmelCase__ ) if framework == "tf": UpperCamelCase_: Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCamelCase_: int = inspect.signature(model_class.forward ) # PyTorch models else: UpperCamelCase_: int = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def snake_case (UpperCAmelCase__ ) -> Optional[int]: UpperCamelCase_: Optional[int] = model_class.__name__ UpperCamelCase_: Any = infer_framework(UpperCAmelCase__ ) if framework == "tf": UpperCamelCase_: List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCamelCase_: Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCamelCase_: Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = "" , UpperCAmelCase__ = "." ) -> Optional[Any]: def _flatten_dict(UpperCAmelCase__ , UpperCAmelCase__="" , UpperCAmelCase__="." ): for k, v in d.items(): UpperCamelCase_: Dict = str(UpperCAmelCase__ ) + delimiter + str(UpperCAmelCase__ ) if parent_key else k if v and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): yield from flatten_dict(UpperCAmelCase__ , UpperCAmelCase__ , delimiter=UpperCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ) @contextmanager def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = False ) -> Union[str, Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def snake_case (UpperCAmelCase__ , UpperCAmelCase__=None ) -> List[str]: if is_numpy_array(UpperCAmelCase__ ): return np.transpose(UpperCAmelCase__ , axes=UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.T if axes is None else array.permute(*UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.transpose(UpperCAmelCase__ , perm=UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.transpose(UpperCAmelCase__ , axes=UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for transpose: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: if is_numpy_array(UpperCAmelCase__ ): return np.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.reshape(*UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for reshape: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__=None ) -> Any: if is_numpy_array(UpperCAmelCase__ ): return np.squeeze(UpperCAmelCase__ , axis=UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.squeeze(UpperCAmelCase__ , axis=UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.squeeze(UpperCAmelCase__ , axis=UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for squeeze: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: if is_numpy_array(UpperCAmelCase__ ): return np.expand_dims(UpperCAmelCase__ , UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.unsqueeze(dim=UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase__ , axis=UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.expand_dims(UpperCAmelCase__ , axis=UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for expand_dims: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ ) -> Dict: if is_numpy_array(UpperCAmelCase__ ): return np.size(UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.numel() elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.size(UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: for key, value in auto_map.items(): if isinstance(UpperCAmelCase__ , (tuple, list) ): UpperCamelCase_: Optional[int] = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCamelCase_: Tuple = F'''{repo_id}--{value}''' return auto_map def snake_case (UpperCAmelCase__ ) -> str: for base_class in inspect.getmro(UpperCAmelCase__ ): UpperCamelCase_: str = base_class.__module__ UpperCamelCase_: List[str] = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
57
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCAmelCase ( _snake_case ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=_snake_case ) lowerCAmelCase = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[int] ="""sigmoid""" __a : List[Any] ="""softmax""" __a : Dict ="""none""" @add_end_docstrings( __UpperCAmelCase , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] =False __a : Any =ClassificationFunction.NONE def __init__( self , **UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="" , **UpperCAmelCase_ ): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" lowerCAmelCase = tokenizer_kwargs lowerCAmelCase = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: lowerCAmelCase = self.model.config.return_all_scores if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k is None: lowerCAmelCase = top_k lowerCAmelCase = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase_ , ) if return_all_scores: lowerCAmelCase = None else: lowerCAmelCase = 1 if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): lowerCAmelCase = super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __snake_case ( self , UpperCAmelCase_ , **UpperCAmelCase_ ): lowerCAmelCase = self.framework if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return self.tokenizer(**UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) == 1 and isinstance(inputs[0] , UpperCAmelCase_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): return self.model(**UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=1 , UpperCAmelCase_=True ): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: lowerCAmelCase = self.model.config.function_to_apply else: lowerCAmelCase = ClassificationFunction.NONE lowerCAmelCase = model_outputs['''logits'''][0] lowerCAmelCase = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase = sigmoid(UpperCAmelCase_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase = softmax(UpperCAmelCase_ ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase_ ) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase_ : x["score"] , reverse=UpperCAmelCase_ ) if top_k is not None: lowerCAmelCase = dict_scores[:top_k] return dict_scores
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
def _lowercase ( __UpperCamelCase : Any ): snake_case__ = [] snake_case__ = [] snake_case__ = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator snake_case__ = len(__UpperCamelCase ) if (len(__UpperCamelCase ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ) , """Stack""".center(__UpperCamelCase ) , """Postfix""".center(__UpperCamelCase ) , sep=""" | """ , ) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(__UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(__UpperCamelCase ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(__UpperCamelCase ) == 0: stack.append(__UpperCamelCase ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(__UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(__UpperCamelCase ) # push x to stack print( x.center(8 ) , ("""""".join(__UpperCamelCase )).ljust(__UpperCamelCase ) , ("""""".join(__UpperCamelCase )).ljust(__UpperCamelCase ) , sep=""" | """ , ) # Output in tabular format while len(__UpperCamelCase ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ) , ("""""".join(__UpperCamelCase )).ljust(__UpperCamelCase ) , ("""""".join(__UpperCamelCase )).ljust(__UpperCamelCase ) , sep=""" | """ , ) # Output in tabular format return "".join(__UpperCamelCase ) # return Postfix as str def _lowercase ( __UpperCamelCase : int ): snake_case__ = list(infix[::-1] ) # reverse the infix equation for i in range(len(__UpperCamelCase ) ): if infix[i] == "(": snake_case__ = """)""" # change "(" to ")" elif infix[i] == ")": snake_case__ = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(__UpperCamelCase ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": lowerCAmelCase : Optional[int] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation lowerCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
214
from math import ceil def _lowercase ( __UpperCamelCase : int = 1001 ): snake_case__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): snake_case__ = 2 * i + 1 snake_case__ = 2 * i snake_case__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: lowerCAmelCase : Tuple = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
214
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
519
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=10_00 , ): """simple docstring""" _snake_case : List[str] = parent _snake_case : List[str] = batch_size _snake_case : Union[str, Any] = num_channels _snake_case : Tuple = image_size _snake_case : Dict = patch_size _snake_case : Any = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : Union[str, Any] = use_token_type_ids _snake_case : Optional[Any] = use_labels _snake_case : List[str] = vocab_size _snake_case : List[str] = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Dict = num_attention_heads _snake_case : int = intermediate_size _snake_case : List[Any] = hidden_act _snake_case : List[Any] = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : Tuple = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : int = initializer_range _snake_case : str = coordinate_size _snake_case : List[str] = shape_size _snake_case : List[Any] = num_labels _snake_case : Any = num_choices _snake_case : Optional[Any] = scope _snake_case : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _snake_case : List[str] = text_seq_length _snake_case : Any = (image_size // patch_size) ** 2 + 1 _snake_case : Optional[int] = self.text_seq_length + self.image_seq_length def __lowerCamelCase( self ): """simple docstring""" _snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _snake_case : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) _snake_case : Optional[Any] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _snake_case : str = bbox[i, j, 3] _snake_case : List[Any] = bbox[i, j, 1] _snake_case : Optional[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: _snake_case : str = bbox[i, j, 2] _snake_case : Optional[int] = bbox[i, j, 0] _snake_case : Tuple = tmp_coordinate _snake_case : Tuple = tf.constant(SCREAMING_SNAKE_CASE__ ) _snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Union[str, Any] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) _snake_case : Optional[int] = None if self.use_token_type_ids: _snake_case : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _snake_case : Optional[int] = None _snake_case : List[Any] = None if self.use_labels: _snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _snake_case : List[Any] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _snake_case : Dict = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ ) # text + image _snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) _snake_case : Any = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , ) _snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _snake_case : str = self.num_labels _snake_case : str = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE__ ) _snake_case : Dict = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _snake_case : Dict = self.num_labels _snake_case : Optional[Any] = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) _snake_case : Any = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _snake_case : List[str] = 2 _snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) _snake_case : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase( self ): """simple docstring""" _snake_case : Tuple = self.prepare_config_and_inputs() ((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : List[Any] = config_and_inputs _snake_case : int = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE_ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" return True def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): """simple docstring""" _snake_case : Dict = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) if model_class in get_values(SCREAMING_SNAKE_CASE__ ): _snake_case : List[Any] = { k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE__ ): _snake_case : int = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(SCREAMING_SNAKE_CASE__ ): _snake_case : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) _snake_case : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(SCREAMING_SNAKE_CASE__ ): _snake_case : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(SCREAMING_SNAKE_CASE__ ): _snake_case : Dict = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __lowerCamelCase( self ): """simple docstring""" _snake_case : List[str] = TFLayoutLMvaModelTester(self ) _snake_case : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __lowerCamelCase( self ): """simple docstring""" self.config_tester.run_common_tests() def __lowerCamelCase( self ): """simple docstring""" _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[Any] = model_class(SCREAMING_SNAKE_CASE__ ) if getattr(SCREAMING_SNAKE_CASE__ , """hf_compute_loss""" , SCREAMING_SNAKE_CASE__ ): # The number of elements in the loss should be the same as the number of elements in the label _snake_case : int = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) _snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=SCREAMING_SNAKE_CASE__ )[0] ] _snake_case : Union[str, Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs _snake_case : str = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) _snake_case : int = prepared_for_class.pop("""input_ids""" ) _snake_case : Tuple = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions _snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) _snake_case : List[str] = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: _snake_case : Union[str, Any] = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: _snake_case : Dict = -1_00 _snake_case : List[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) _snake_case : int = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict _snake_case : str = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) _snake_case : str = model(SCREAMING_SNAKE_CASE__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple _snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) # Get keys that were added with the _prepare_for_class function _snake_case : str = prepared_for_class.keys() - inputs_dict.keys() _snake_case : List[Any] = inspect.signature(model.call ).parameters _snake_case : List[Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple _snake_case : str = {0: """input_ids"""} for label_key in label_keys: _snake_case : Tuple = signature_names.index(SCREAMING_SNAKE_CASE__ ) _snake_case : Dict = label_key _snake_case : Tuple = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple _snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: _snake_case : Tuple = prepared_for_class[value] _snake_case : Union[str, Any] = tuple(SCREAMING_SNAKE_CASE__ ) # Send to model _snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __lowerCamelCase( self ): """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase( self ): """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case : Optional[Any] = type self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase( self ): """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase( self ): """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase( self ): """simple docstring""" ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def __lowerCamelCase( self ): """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase ( ) -> Any: _snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __lowerCamelCase( self ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None @slow def __lowerCamelCase( self ): """simple docstring""" _snake_case : Optional[int] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) _snake_case : Optional[Any] = self.default_image_processor _snake_case : Tuple = prepare_img() _snake_case : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""tf""" ).pixel_values _snake_case : Dict = tf.constant([[1, 2]] ) _snake_case : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass _snake_case : Any = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) # verify the logits _snake_case : str = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) _snake_case : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
519
1
from __future__ import annotations from typing import Any class UpperCamelCase_ : def __init__( self :List[str] , __A :int , __A :int , __A :float = 0 ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = row, column SCREAMING_SNAKE_CASE__ = [[default_value for c in range(__A )] for r in range(__A )] def __str__( self :Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = f'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier SCREAMING_SNAKE_CASE__ = 0 for row_vector in self.array: for obj in row_vector: SCREAMING_SNAKE_CASE__ = max(__A , len(str(__A ) ) ) SCREAMING_SNAKE_CASE__ = f'''%{max_element_length}s''' # Make string and return def single_line(__A :list[float] ) -> str: nonlocal string_format_identifier SCREAMING_SNAKE_CASE__ = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__A ) for row_vector in self.array ) return s def __repr__( self :Optional[Any] ) -> str: """simple docstring""" return str(self ) def _snake_case ( self :Tuple , __A :tuple[int, int] ) -> bool: """simple docstring""" if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self :List[Any] , __A :tuple[int, int] ) -> Any: """simple docstring""" assert self.validate_indicies(__A ) return self.array[loc[0]][loc[1]] def __setitem__( self :int , __A :tuple[int, int] , __A :float ) -> None: """simple docstring""" assert self.validate_indicies(__A ) SCREAMING_SNAKE_CASE__ = value def __add__( self :Any , __A :Matrix ) -> Matrix: """simple docstring""" assert isinstance(__A , __A ) assert self.row == another.row and self.column == another.column # Add SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE__ = self[r, c] + another[r, c] return result def __neg__( self :Dict ) -> Matrix: """simple docstring""" SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE__ = -self[r, c] return result def __sub__( self :Union[str, Any] , __A :Matrix ) -> Matrix: """simple docstring""" return self + (-another) def __mul__( self :Tuple , __A :int | float | Matrix ) -> Matrix: """simple docstring""" if isinstance(__A , (int, float) ): # Scalar multiplication SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE__ = self[r, c] * another return result elif isinstance(__A , __A ): # Matrix multiplication assert self.column == another.row SCREAMING_SNAKE_CASE__ = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: SCREAMING_SNAKE_CASE__ = f'''Unsupported type given for another ({type(__A )})''' raise TypeError(__A ) def _snake_case ( self :int ) -> Matrix: """simple docstring""" SCREAMING_SNAKE_CASE__ = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): SCREAMING_SNAKE_CASE__ = self[r, c] return result def _snake_case ( self :List[str] , __A :Matrix , __A :Matrix ) -> Any: """simple docstring""" assert isinstance(__A , __A ) and isinstance(__A , __A ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate SCREAMING_SNAKE_CASE__ = v.transpose() SCREAMING_SNAKE_CASE__ = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def SCREAMING_SNAKE_CASE__ ( ): # a^(-1) SCREAMING_SNAKE_CASE__ = Matrix(3 , 3 , 0 ) for i in range(3 ): SCREAMING_SNAKE_CASE__ = 1 print(f'''a^(-1) is {ainv}''' ) # u, v SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1, 2, -3 SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 4, -2, 5 print(f'''u is {u}''' ) print(f'''v is {v}''' ) print(f'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' ) def SCREAMING_SNAKE_CASE__ ( ): import doctest doctest.testmod() testa()
6
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
1
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) lowerCAmelCase__ = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : Union[str, Any] = {} state_dict.pop('pixel_mean' , lowerCamelCase_) state_dict.pop('pixel_std' , lowerCamelCase_) UpperCamelCase__ : str = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCamelCase__ : int = key.replace(lowerCamelCase_ , lowerCamelCase_) if re.match(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase__ : int = int(re.match(lowerCamelCase_ , lowerCamelCase_).group(2)) if layer_nb == 0: UpperCamelCase__ : Optional[Any] = key.replace('layers.0' , 'proj_in') elif layer_nb == 1: UpperCamelCase__ : List[str] = key.replace('layers.1' , 'layers.0') elif layer_nb == 2: UpperCamelCase__ : List[Any] = key.replace('layers.2' , 'proj_out') UpperCamelCase__ : Any = value UpperCamelCase__ : int = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="ybelkada/segment-anything") -> Any: UpperCamelCase__ : Dict = hf_hub_download(lowerCamelCase_ , f'checkpoints/{model_name}.pth') if "sam_vit_b" in model_name: UpperCamelCase__ : Tuple = SamConfig() elif "sam_vit_l" in model_name: UpperCamelCase__ : Union[str, Any] = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) UpperCamelCase__ : Tuple = SamConfig( vision_config=lowerCamelCase_ , ) elif "sam_vit_h" in model_name: UpperCamelCase__ : Optional[Any] = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) UpperCamelCase__ : str = SamConfig( vision_config=lowerCamelCase_ , ) UpperCamelCase__ : Optional[int] = torch.load(lowerCamelCase_ , map_location='cpu') UpperCamelCase__ : Union[str, Any] = replace_keys(lowerCamelCase_) UpperCamelCase__ : int = SamImageProcessor() UpperCamelCase__ : Dict = SamProcessor(image_processor=lowerCamelCase_) UpperCamelCase__ : Union[str, Any] = SamModel(lowerCamelCase_) hf_model.load_state_dict(lowerCamelCase_) UpperCamelCase__ : Dict = hf_model.to('cuda') UpperCamelCase__ : Dict = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' UpperCamelCase__ : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw).convert('RGB') UpperCamelCase__ : Dict = [[[400, 650]]] UpperCamelCase__ : List[str] = [[1]] UpperCamelCase__ : Any = processor(images=np.array(lowerCamelCase_) , return_tensors='pt').to('cuda') with torch.no_grad(): UpperCamelCase__ : Tuple = hf_model(**lowerCamelCase_) UpperCamelCase__ : str = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 UpperCamelCase__ : Union[str, Any] = processor( images=np.array(lowerCamelCase_) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors='pt').to('cuda') with torch.no_grad(): UpperCamelCase__ : List[str] = hf_model(**lowerCamelCase_) UpperCamelCase__ : List[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 UpperCamelCase__ : Tuple = ((75, 275, 1_725, 850),) UpperCamelCase__ : str = processor(images=np.array(lowerCamelCase_) , input_boxes=lowerCamelCase_ , return_tensors='pt').to('cuda') with torch.no_grad(): UpperCamelCase__ : Tuple = hf_model(**lowerCamelCase_) UpperCamelCase__ : Union[str, Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. UpperCamelCase__ : str = [[[400, 650], [800, 650]]] UpperCamelCase__ : Optional[int] = [[1, 1]] UpperCamelCase__ : Any = processor( images=np.array(lowerCamelCase_) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors='pt').to('cuda') with torch.no_grad(): UpperCamelCase__ : List[str] = hf_model(**lowerCamelCase_) UpperCamelCase__ : Optional[int] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() lowerCAmelCase__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) lowerCAmelCase__ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
6
'''simple docstring''' from __future__ import annotations class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]): UpperCamelCase__ : int = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.') if len(UpperCAmelCase_) != 0: UpperCamelCase__ : str = len(rows[0]) if cols == 0: raise error for row in rows: if len(UpperCAmelCase_) != cols: raise error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise error UpperCamelCase__ : Optional[int] = rows else: UpperCamelCase__ : Optional[Any] = [] def __UpperCamelCase ( self : Union[str, Any]): return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property def __UpperCamelCase ( self : Dict): return len(self.rows) @property def __UpperCamelCase ( self : Tuple): return len(self.rows[0]) @property def __UpperCamelCase ( self : List[Any]): return (self.num_rows, self.num_columns) @property def __UpperCamelCase ( self : Any): return self.order[0] == self.order[1] def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[int] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : Dict): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0]) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0])) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns)) def __UpperCamelCase ( self : str): return bool(self.determinant()) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int): UpperCamelCase__ : Optional[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns) if other_column != column ] for other_row in range(self.num_rows) if other_row != row ] return Matrix(UpperCAmelCase_).determinant() def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int): if (row + column) % 2 == 0: return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : List[Any]): return Matrix( [ [self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)] for row in range(self.num_rows) ]) def __UpperCamelCase ( self : Optional[int]): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns) ] for row in range(self.minors().num_rows) ]) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse') return self.adjugate() * (1 / determinant) def __repr__( self : Any): return str(self.rows) def __str__( self : List[Any]): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]' for row in self.rows ]) + "]" ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix') if position is None: self.rows.append(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:] def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : int = TypeError( 'Column must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in column: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix') if position is None: UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)] else: UpperCamelCase__ : str = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows) ] def __eq__( self : List[Any] , UpperCAmelCase_ : object): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): return NotImplemented return self.rows == other.rows def __ne__( self : Any , UpperCAmelCase_ : object): return not self == other def __neg__( self : Union[str, Any]): return self * -1 def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Addition requires matrices of the same order') return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __sub__( self : Tuple , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order') return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float): if isinstance(UpperCAmelCase_ , (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows]) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second') return Matrix( [ [Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()] for row in self.rows ]) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix') def __pow__( self : Dict , UpperCAmelCase_ : int): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise TypeError('A Matrix can only be raised to the power of an int') if not self.is_square: raise ValueError('Only square matrices can be raised to a power') if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power') UpperCamelCase__ : str = self for _ in range(other - 1): result *= self return result @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]): return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_))) if __name__ == "__main__": import doctest doctest.testmod()
6
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def lowercase__( __UpperCamelCase: Any ): """simple docstring""" print('Loading config file...' ) def flatten_yaml_as_dict(__UpperCamelCase: List[Any] ,__UpperCamelCase: Optional[Any]="" ,__UpperCamelCase: List[str]="." ): SCREAMING_SNAKE_CASE : List[Any] = [] for k, v in d.items(): SCREAMING_SNAKE_CASE : List[Any] = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = argparse.Namespace() with open(__UpperCamelCase ,'r' ) as yaml_file: try: SCREAMING_SNAKE_CASE : Dict = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader ) SCREAMING_SNAKE_CASE : Any = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) ) return config def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = MobileViTVaConfig() SCREAMING_SNAKE_CASE : Any = False # dataset if task_name.startswith('imagenet1k_' ): SCREAMING_SNAKE_CASE : Dict = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: SCREAMING_SNAKE_CASE : Optional[Any] = 3_84 else: SCREAMING_SNAKE_CASE : List[Any] = 2_56 SCREAMING_SNAKE_CASE : Optional[Any] = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): SCREAMING_SNAKE_CASE : Optional[int] = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: SCREAMING_SNAKE_CASE : List[Any] = 3_84 else: SCREAMING_SNAKE_CASE : Optional[Any] = 2_56 SCREAMING_SNAKE_CASE : Dict = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): SCREAMING_SNAKE_CASE : Optional[Any] = 1_51 SCREAMING_SNAKE_CASE : Optional[int] = 5_12 SCREAMING_SNAKE_CASE : Dict = 'ade20k-id2label.json' SCREAMING_SNAKE_CASE : Union[str, Any] = True elif task_name.startswith('voc_' ): SCREAMING_SNAKE_CASE : Tuple = 21 SCREAMING_SNAKE_CASE : List[Any] = 5_12 SCREAMING_SNAKE_CASE : Optional[Any] = 'pascal-voc-id2label.json' SCREAMING_SNAKE_CASE : Dict = True # orig_config SCREAMING_SNAKE_CASE : Optional[Any] = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model" SCREAMING_SNAKE_CASE : Tuple = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 ) assert ( getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" SCREAMING_SNAKE_CASE : int = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 ) if "_deeplabv3" in task_name: SCREAMING_SNAKE_CASE : str = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] ) SCREAMING_SNAKE_CASE : Tuple = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 ) SCREAMING_SNAKE_CASE : int = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 ) # id2label SCREAMING_SNAKE_CASE : List[str] = 'huggingface/label-files' SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = dct.pop(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = val def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: List[str]=False ): """simple docstring""" if base_model: SCREAMING_SNAKE_CASE : Union[str, Any] = '' else: SCREAMING_SNAKE_CASE : List[str] = 'mobilevitv2.' SCREAMING_SNAKE_CASE : Optional[Any] = [] for k in state_dict.keys(): if k[:8] == "encoder.": SCREAMING_SNAKE_CASE : int = k[8:] else: SCREAMING_SNAKE_CASE : Optional[int] = k if ".block." in k: SCREAMING_SNAKE_CASE : str = k_new.replace('.block.' ,'.' ) if ".conv." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('.conv.' ,'.convolution.' ) if ".norm." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('.norm.' ,'.normalization.' ) if "conv_1." in k: SCREAMING_SNAKE_CASE : Any = k_new.replace('conv_1.' ,f"{model_prefix}conv_stem." ) for i in [1, 2]: if f"layer_{i}." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}." ,f"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace('.exp_1x1.' ,'.expand_1x1.' ) if ".red_1x1." in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace('.red_1x1.' ,'.reduce_1x1.' ) for i in [3, 4, 5]: if f"layer_{i}.0." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.0." ,f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if f"layer_{i}.1.local_rep.0." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.1.local_rep.0." ,f"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if f"layer_{i}.1.local_rep.1." in k: SCREAMING_SNAKE_CASE : Dict = k_new.replace(f"layer_{i}.1.local_rep.1." ,f"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: SCREAMING_SNAKE_CASE : int = [0, 1] elif i == 4: SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 3] elif i == 5: SCREAMING_SNAKE_CASE : List[str] = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace( f"layer_{i}.1.global_rep.{j}." ,f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j+1}." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace( f"layer_{i}.1.global_rep.{j+1}." ,f"{model_prefix}encoder.layer.{i-1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f"layer_{i}.1.conv_proj." ,f"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' ) if "pre_norm_attn.1." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('pre_norm_attn.1.' ,'attention.' ) if "pre_norm_ffn.0." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' ) if "pre_norm_ffn.1." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' ) if "pre_norm_ffn.3." in k: SCREAMING_SNAKE_CASE : int = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' ) if "classifier.1." in k: SCREAMING_SNAKE_CASE : int = k_new.replace('classifier.1.' ,'classifier.' ) if "seg_head." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace('seg_head.' ,'segmentation_head.' ) if ".aspp_layer." in k: SCREAMING_SNAKE_CASE : Any = k_new.replace('.aspp_layer.' ,'.' ) if ".aspp_pool." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace('.aspp_pool.' ,'.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__( __UpperCamelCase: Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase ) # load original state_dict SCREAMING_SNAKE_CASE : Dict = torch.load(__UpperCamelCase ,map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() SCREAMING_SNAKE_CASE : List[str] = False else: SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTVaForImageClassification(__UpperCamelCase ).eval() SCREAMING_SNAKE_CASE : Optional[Any] = False # remove and rename some keys of load the original model SCREAMING_SNAKE_CASE : Dict = checkpoint remove_unused_keys(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 ) SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=prepare_img() ,return_tensors='pt' ) SCREAMING_SNAKE_CASE : Tuple = model(**__UpperCamelCase ) # verify classification model if task_name.startswith('imagenet' ): SCREAMING_SNAKE_CASE : Any = outputs.logits SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(-1 ).item() print('Predicted class:' ,model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCamelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) UpperCamelCase_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Union[str, Any]: if attention_mask is None: a = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: a = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: a = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__UpperCamelCase) if decoder_head_mask is None: a = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCamelCase) if cross_attn_head_mask is None: a = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCamelCase) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class a__ : def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=16 , A=2 , A=4 , A=4 , A="relu" , A=0.1 , A=0.1 , A=0.0 , A=0.0 , A=20 , A=2 , A=1 , A=0 , ) -> Any: '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = encoder_layerdrop a = decoder_layerdrop a = max_position_embeddings a = eos_token_id a = pad_token_id a = bos_token_id def lowerCAmelCase_ ( self ) -> Any: '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = self.eos_token_id # Eos Token a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input a = input_ids.clamp(self.pad_token_id + 1 ) a = decoder_input_ids.clamp(self.pad_token_id + 1 ) a = self.get_config() a = prepare_mam_aaa_inputs_dict(A , A , A ) return config, inputs_dict def lowerCAmelCase_ ( self ) -> Any: '''simple docstring''' return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' a , a = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase_ ( self , A , A ) -> Optional[int]: '''simple docstring''' a = MaMaaaModel(config=A ).get_decoder().to(A ).eval() a = inputs_dict["input_ids"] a = inputs_dict["attention_mask"] a = inputs_dict["head_mask"] # first forward pass a = model(A , attention_mask=A , head_mask=A , use_cache=A ) a , a = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids a = ids_tensor((self.batch_size, 3) , config.vocab_size ) a = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and a = torch.cat([input_ids, next_tokens] , dim=-1 ) a = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) a = model(A , attention_mask=A )["last_hidden_state"] a = model(A , attention_mask=A , past_key_values=A )[ "last_hidden_state" ] # select random slice a = ids_tensor((1,) , output_from_past.shape[-1] ).item() a = output_from_no_past[:, -3:, random_slice_idx].detach() a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-2 ) ) def lowerCAmelCase_ ( self , A , A ) -> Union[str, Any]: '''simple docstring''' a = MaMaaaModel(config=A ).to(A ).eval() a = model(**A ) a = outputs.encoder_last_hidden_state a = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: a = model.get_encoder() encoder.save_pretrained(A ) a = MaMaaaEncoder.from_pretrained(A ).to(A ) a = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: a = model.get_decoder() decoder.save_pretrained(A ) a = MaMaaaDecoder.from_pretrained(A ).to(A ) a = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class a__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a : Union[str, Any] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) a : Dict = (MaMaaaForConditionalGeneration,) if is_torch_available() else () a : Union[str, Any] = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) a : Optional[int] = True a : int = True a : Tuple = False a : Dict = False def lowerCAmelCase_ ( self , A , A , A , A , A ) -> Optional[int]: '''simple docstring''' if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' a = MaMaaaModelTester(self ) a = ConfigTester(self , config_class=A ) def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ) -> int: '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: a = model_class(A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A ) a , a = model_class.from_pretrained(A , output_loading_info=A ) self.assertEqual(info["missing_keys"] , [] ) def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*A ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*A ) def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): a = model_class(A ) model.to(A ) model.eval() a = copy.deepcopy(self._prepare_for_class(A , A ) ) if not self.is_encoder_decoder: a = inputs["input_ids"] del inputs["input_ids"] else: a = inputs["input_ids"] a = inputs.get("decoder_input_ids" , A ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , A ) a = model.get_input_embeddings() if not self.is_encoder_decoder: a = wte(A ) else: a = wte(A ) a = wte(A ) with torch.no_grad(): model(**A )[0] def lowerCAmelCase_ ( self ) -> Dict: '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs() a = input_dict["input_ids"] a = input_ids.ne(1 ).to(A ) a = MaMaaaForConditionalGeneration(A ).eval().to(A ) if torch_device == "cuda": model.half() model.generate(A , attention_mask=A ) model.generate(num_beams=4 , do_sample=A , early_stopping=A , num_return_sequences=3 ) def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Dict: return torch.tensor(__UpperCamelCase , dtype=torch.long , device=__UpperCamelCase) lowercase__ : Optional[int] = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class a__ ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self ) -> Optional[int]: '''simple docstring''' return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def lowerCAmelCase_ ( self ) -> Optional[int]: '''simple docstring''' a = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(A ) a = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) a = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) a = prepare_mam_aaa_inputs_dict(model.config , A , A ) with torch.no_grad(): a = model(**A )[0] a = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , A ) # change to expected output here a = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=A ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=A ) ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' a = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(A ) # change to intended input a = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) a = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) a = prepare_mam_aaa_inputs_dict(model.config , A , A ) with torch.no_grad(): a = model(**A )[0] a = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , A ) # change to expected output here a = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=A ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=A ) ) def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' a = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(A ) a = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) a = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams a = tokenizer(A , padding=A , return_tensors="pt" ) a = model.generate( input_ids=dct["input_ids"].to(A ) , attention_mask=dct["attention_mask"].to(A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) a = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] a = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=A , skip_special_tokens=A ) assert generated == expected_en
515
0
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int lowerCamelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _lowerCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' snake_case_ = None def _SCREAMING_SNAKE_CASE( snake_case_ : "pyspark.sql.DataFrame" , snake_case_ : List[int] , ) ->List[Any]: '''simple docstring''' import pyspark def generate_fn(): _lowercase : int = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _lowercase : List[str] = df_with_partition_id.select('''*''' ).where(F"part_id = {partition_id}" ).drop('''part_id''' ) _lowercase : Union[str, Any] = partition_df.collect() _lowercase : List[Any] = 0 for row in rows: yield F"{partition_id}_{row_id}", row.asDict() row_id += 1 return generate_fn class _lowerCAmelCase ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=None , ) -> List[Any]: '''simple docstring''' _lowercase : List[Any] = df _lowercase : Any = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : List[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ) -> Tuple: '''simple docstring''' yield from self.generate_examples_fn() def __lowercase ( self : Any , UpperCamelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' _lowercase : Any = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCAmelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCAmelCase ) def __lowercase ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _lowercase : Optional[Any] = self.split_shard_indices_by_worker(_lowerCAmelCase , _lowerCAmelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCAmelCase ) @property def __lowercase ( self : Any ) -> str: '''simple docstring''' return len(self.partition_order ) class _lowerCAmelCase ( datasets.DatasetBuilder ): '''simple docstring''' snake_case_ = SparkConfig def __init__( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : str = None , **UpperCamelCase_ : Optional[int] , ) -> int: '''simple docstring''' import pyspark _lowercase : Dict = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : List[str] = working_dir super().__init__( cache_dir=_lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **_lowerCAmelCase , ) def __lowercase ( self : Dict ) -> List[str]: '''simple docstring''' def create_cache_and_write_probe(UpperCamelCase_ : Dict ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_lowerCAmelCase ) _lowercase : Optional[int] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCAmelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : Any = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def __lowercase ( self : Tuple ) -> Any: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self : Tuple , UpperCamelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __lowercase ( self : str , UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' import pyspark def get_arrow_batch_size(UpperCamelCase_ : Union[str, Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _lowercase : List[Any] = self.df.count() _lowercase : Any = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : int = ( self.df.limit(_lowerCAmelCase ) .repartition(1 ) .mapInArrow(_lowerCAmelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[str] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : Union[str, Any] = min(_lowerCAmelCase , int(approx_total_size / max_shard_size ) ) _lowercase : Union[str, Any] = self.df.repartition(_lowerCAmelCase ) def __lowercase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , ) -> List[str]: '''simple docstring''' import pyspark _lowercase : Tuple = ParquetWriter if file_format == '''parquet''' else ArrowWriter _lowercase : Tuple = os.path.join(self._working_dir , os.path.basename(_lowerCAmelCase ) ) if self._working_dir else fpath _lowercase : List[str] = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Tuple = self.config.features _lowercase : Union[str, Any] = self._writer_batch_size _lowercase : int = self._fs.storage_options def write_arrow(UpperCamelCase_ : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[Any] = next(_lowerCAmelCase , _lowerCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _lowercase : List[str] = 0 _lowercase : List[str] = writer_class( features=_lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=_lowerCAmelCase , storage_options=_lowerCAmelCase , embed_local_files=_lowerCAmelCase , ) _lowercase : Tuple = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[int] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _lowercase : Optional[int] = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=_lowerCAmelCase , storage_options=_lowerCAmelCase , embed_local_files=_lowerCAmelCase , ) _lowercase : List[str] = pa.Table.from_batches([batch] ) writer.write_table(_lowerCAmelCase ) if writer._num_bytes > 0: _lowercase , _lowercase : int = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCAmelCase ) ): _lowercase : str = os.path.join(os.path.dirname(_lowerCAmelCase ) , os.path.basename(_lowerCAmelCase ) ) shutil.move(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : int = ( self.df.mapInArrow(_lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __lowercase ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] = "arrow" , UpperCamelCase_ : str = None , UpperCamelCase_ : Union[str, Any] = None , **UpperCamelCase_ : List[str] , ) -> str: '''simple docstring''' self._validate_cache_dir() _lowercase : Any = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCAmelCase ) _lowercase : Tuple = not is_remote_filesystem(self._fs ) _lowercase : Optional[Any] = os.path.join if is_local else posixpath.join _lowercase : Union[str, Any] = '''-TTTTT-SSSSS-of-NNNNN''' _lowercase : Optional[int] = F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" _lowercase : Optional[int] = path_join(self._output_dir , _lowerCAmelCase ) _lowercase : Optional[Any] = 0 _lowercase : Tuple = 0 _lowercase : Optional[int] = 0 _lowercase : Any = [] _lowercase : Dict = [] for task_id, content in self._prepare_split_single(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[str] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCAmelCase ) _lowercase : Optional[int] = total_num_examples _lowercase : Any = total_num_bytes # should rename everything at the end logger.debug(F"Renaming {total_shards} shards." ) if total_shards > 1: _lowercase : Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , ): rename( _lowerCAmelCase , fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace('''TTTTT-SSSSS''' , F"{global_shard_id:05d}" ).replace('''NNNNN''' , F"{total_shards:05d}" ) , ) _lowercase : str = [] _lowercase : str = 0 for i in range(len(_lowerCAmelCase ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(_lowerCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCAmelCase , len(_lowerCAmelCase ) ).map(lambda UpperCamelCase_ : _rename_shard(*_lowerCAmelCase ) ).collect() else: # don't use any pattern _lowercase : Dict = 0 _lowercase : Dict = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace(_lowerCAmelCase , '''''' ) , ) def __lowercase ( self : Any , UpperCamelCase_ : Optional[Any] , ) -> Optional[Any]: '''simple docstring''' return SparkExamplesIterable(self.df )
709
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[Any] ) -> Tuple: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : str , *UpperCamelCase_ : int , **UpperCamelCase_ : int ) -> str: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> Any: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Any , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Any ) -> int: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : Any , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ) -> str: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : List[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : str , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ) -> Any: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Optional[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ) -> str: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Any , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> str: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : List[str] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : List[str] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Tuple ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : int ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Optional[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int] ) -> int: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : List[str] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict ) -> Dict: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Optional[int] ) -> Dict: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple ) -> Any: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : str , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Tuple , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' requires_backends(cls , ['''flax'''] ) class _lowerCAmelCase ( metaclass=__A ): '''simple docstring''' snake_case_ = ['flax'] def __init__( self : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' requires_backends(self , ['''flax'''] ) @classmethod def __lowercase ( cls : Dict , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str] ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''flax'''] ) @classmethod def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[int] ) -> Dict: '''simple docstring''' requires_backends(cls , ['''flax'''] )
411
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "x" , _SCREAMING_SNAKE_CASE = 10**-10 , _SCREAMING_SNAKE_CASE = 1 , ) -> complex: """simple docstring""" _A = symbols(_SCREAMING_SNAKE_CASE ) _A = lambdify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _A = lambdify(_SCREAMING_SNAKE_CASE , diff(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _A = starting_point while True: if diff_function(_SCREAMING_SNAKE_CASE ) != 0: _A = prev_guess - multiplicity * func(_SCREAMING_SNAKE_CASE ) / diff_function( _SCREAMING_SNAKE_CASE ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess _A = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial # Find fourth Root of 5 print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}") # Find value of e print( "The root of log(y) - 1 = 0 is ", f"{newton_raphson('log(y) - 1', 2, variable='y')}", ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", f"{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}", ) # Find root of cos(x) print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
27
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __A : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
27
1
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) __lowercase = hex_num[0] == """-""" if is_negative: __lowercase = hex_num[1:] try: __lowercase = int(__A , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) __lowercase = """""" while int_num > 0: __lowercase = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
701
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) __lowercase = MaskFormerConfig(backbone_config=lowerCamelCase ) __lowercase = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok __lowercase = 847 __lowercase = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok __lowercase = 150 __lowercase = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok __lowercase = 171 __lowercase = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO __lowercase = 133 __lowercase = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok __lowercase = 19 __lowercase = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok __lowercase = 65 __lowercase = """mapillary-vistas-id2label.json""" __lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()} return config def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = dct.pop(lowerCamelCase ) __lowercase = val def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[:dim, :] __lowercase = in_proj_bias[: dim] __lowercase = in_proj_weight[ dim : dim * 2, : ] __lowercase = in_proj_bias[ dim : dim * 2 ] __lowercase = in_proj_weight[ -dim :, : ] __lowercase = in_proj_bias[-dim :] # fmt: on def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # fmt: on def snake_case ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ): '''simple docstring''' __lowercase = get_maskformer_config(lowerCamelCase ) # load original state_dict with open(lowerCamelCase , """rb""" ) as f: __lowercase = pickle.load(lowerCamelCase ) __lowercase = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowercase = create_rename_keys(lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) read_in_swin_q_k_v(lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): __lowercase = torch.from_numpy(lowerCamelCase ) # load 🤗 model __lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase , param.shape ) __lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results __lowercase = prepare_img() if "vistas" in model_name: __lowercase = 65 elif "cityscapes" in model_name: __lowercase = 65_535 else: __lowercase = 255 __lowercase = True if """ade""" in model_name else False __lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase ) __lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" ) __lowercase = model(**lowerCamelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowercase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCamelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
53
0
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : Callable , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Tuple , ): '''simple docstring''' super().__init__( features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case__ = Generator( cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , generator=UpperCamelCase__ , gen_kwargs=UpperCamelCase__ , **UpperCamelCase__ , ) def __magic_name__ ( self : str): '''simple docstring''' if self.streaming: snake_case__ = self.builder.as_streaming_dataset(split="""train""") # Build regular (map-style) dataset else: snake_case__ = None snake_case__ = None snake_case__ = None snake_case__ = None self.builder.download_and_prepare( download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , ) snake_case__ = self.builder.as_dataset( split="""train""" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory) return dataset
654
a__ = [0, 2, 4, 6, 8] a__ = [1, 3, 5, 7, 9] def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ = 0 for digit in range(10 ): snake_case__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , a , a ) return result snake_case__ = 0 for digita in range(10 ): snake_case__ = digita if (remainder + digita) % 2 == 0: snake_case__ = ODD_DIGITS else: snake_case__ = EVEN_DIGITS for digita in other_parity_digits: snake_case__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , ) return result def _UpperCAmelCase ( a : int = 9 ): snake_case__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a , 0 , [0] * length , a ) return result if __name__ == "__main__": print(F'''{solution() = }''')
654
1
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __a ( _snake_case ): def __init__( self : List[Any] ,lowerCamelCase : NestedDataStructureLike[PathLike] ,lowerCamelCase : Optional[NamedSplit] = None ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[str] = None ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,): '''simple docstring''' super().__init__( lowerCamelCase ,split=lowerCamelCase ,features=lowerCamelCase ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ,streaming=lowerCamelCase ,num_proc=lowerCamelCase ,**lowerCamelCase ,) __SCREAMING_SNAKE_CASE = field __SCREAMING_SNAKE_CASE = path_or_paths if isinstance(lowerCamelCase ,lowerCamelCase ) else {self.split: path_or_paths} __SCREAMING_SNAKE_CASE = Json( cache_dir=lowerCamelCase ,data_files=lowerCamelCase ,features=lowerCamelCase ,field=lowerCamelCase ,**lowerCamelCase ,) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.streaming: __SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None self.builder.download_and_prepare( download_config=lowerCamelCase ,download_mode=lowerCamelCase ,verification_mode=lowerCamelCase ,base_path=lowerCamelCase ,num_proc=self.num_proc ,) __SCREAMING_SNAKE_CASE = self.builder.as_dataset( split=self.split ,verification_mode=lowerCamelCase ,in_memory=self.keep_in_memory ) return dataset class __a : def __init__( self : Tuple ,lowerCamelCase : Dataset ,lowerCamelCase : Union[PathLike, BinaryIO] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : str ,): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = path_or_buf __SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __SCREAMING_SNAKE_CASE = num_proc __SCREAMING_SNAKE_CASE = """utf-8""" __SCREAMING_SNAKE_CASE = to_json_kwargs def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""path_or_buf""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""orient""" ,"""records""" ) __SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""lines""" ,True if orient == """records""" else False ) __SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""index""" ,False if orient in ["""split""", """table"""] else True ) __SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""compression""" ,lowerCamelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"""wb""" ,compression=lowerCamelCase ) as buffer: __SCREAMING_SNAKE_CASE = self._write(file_obj=lowerCamelCase ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( f"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" """ was passed. Please provide a local path instead.""" ) __SCREAMING_SNAKE_CASE = self._write( file_obj=self.path_or_buf ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**self.to_json_kwargs ) return written def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args __SCREAMING_SNAKE_CASE = query_table( table=self.dataset.data ,key=slice(lowerCamelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) __SCREAMING_SNAKE_CASE = batch.to_pandas().to_json( path_or_buf=lowerCamelCase ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**lowerCamelCase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : BinaryIO ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Any ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,): __SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowerCamelCase ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,lowerCamelCase ,lowerCamelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,): written += file_obj.write(lowerCamelCase ) return written
13
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def __lowerCAmelCase ( _UpperCamelCase ) -> Dict[str, torch.Tensor]: '''simple docstring''' lowerCamelCase__: str = [] lowerCamelCase__: Any = [] lowerCamelCase__: Union[str, Any] = [] for rt in rc.restypes: lowerCamelCase__: Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) lowerCamelCase__: Optional[Any] = {name: i for i, name in enumerate(_UpperCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) lowerCamelCase__: Optional[int] = torch.tensor( _UpperCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , ) lowerCamelCase__: Dict = torch.tensor( _UpperCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , ) lowerCamelCase__: Any = torch.tensor( _UpperCamelCase , dtype=torch.floataa , device=protein["""aatype"""].device , ) lowerCamelCase__: Dict = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowerCamelCase__: List[str] = restype_atomaa_to_atomaa[protein_aatype] lowerCamelCase__: List[str] = restype_atomaa_mask[protein_aatype] lowerCamelCase__: List[Any] = residx_atomaa_mask lowerCamelCase__: int = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowerCamelCase__: List[str] = restype_atomaa_to_atomaa[protein_aatype] lowerCamelCase__: str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowerCamelCase__: Dict = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): lowerCamelCase__: Optional[int] = rc.restype_atoa[restype_letter] lowerCamelCase__: Union[str, Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowerCamelCase__: Optional[int] = rc.atom_order[atom_name] lowerCamelCase__: Tuple = 1 lowerCamelCase__: int = restype_atomaa_mask[protein_aatype] lowerCamelCase__: Dict = residx_atomaa_mask return protein def __lowerCAmelCase ( _UpperCamelCase ) -> Dict[str, np.ndarray]: '''simple docstring''' lowerCamelCase__: str = tree_map(lambda _UpperCamelCase : torch.tensor(_UpperCamelCase , device=batch["""aatype"""].device ) , _UpperCamelCase , np.ndarray ) lowerCamelCase__: Optional[Any] = tensor_tree_map(lambda _UpperCamelCase : np.array(_UpperCamelCase ) , make_atomaa_masks(_UpperCamelCase ) ) return out
306
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase_ ( self : int ): '''simple docstring''' lowerCamelCase__: Optional[Any] = 1 lowerCamelCase__: Union[str, Any] = 3 lowerCamelCase__: str = (32, 32) lowerCamelCase__: str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__: Dict = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__: Dict = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def lowerCamelCase_ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__: str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) return CLIPTextModel(__a ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowerCamelCase__: str = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale lowerCamelCase__: Optional[Any] = DDPMScheduler() lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" ) lowerCamelCase__: Tuple = self.dummy_vae lowerCamelCase__: Optional[int] = self.dummy_text_encoder lowerCamelCase__: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__: Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__: Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) lowerCamelCase__: Any = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase__: List[str] = """A painting of a squirrel eating a burger""" lowerCamelCase__: Dict = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase__: Any = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__: List[str] = output.images lowerCamelCase__: Union[str, Any] = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase__: List[str] = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__a , )[0] lowerCamelCase__: Tuple = image[0, -3:, -3:, -1] lowerCamelCase__: int = image_from_tuple[0, -3:, -3:, -1] lowerCamelCase__: int = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) lowerCamelCase__: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowerCamelCase__: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale lowerCamelCase__: Optional[int] = DDPMScheduler() lowerCamelCase__: Any = DDIMScheduler(prediction_type="""v_prediction""" ) lowerCamelCase__: List[str] = self.dummy_vae lowerCamelCase__: Optional[Any] = self.dummy_text_encoder lowerCamelCase__: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__: str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__: List[str] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk lowerCamelCase__: Tuple = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) lowerCamelCase__: List[str] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase__: Any = """A painting of a squirrel eating a burger""" lowerCamelCase__: str = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__: Any = output.images assert image.shape[0] == 2 lowerCamelCase__: Optional[Any] = torch.Generator(device=__a ).manual_seed(0 ) lowerCamelCase__: Dict = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__: Tuple = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__: int = self.dummy_cond_unet_upscale lowerCamelCase__: Dict = DDPMScheduler() lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" ) lowerCamelCase__: List[str] = self.dummy_vae lowerCamelCase__: Tuple = self.dummy_text_encoder lowerCamelCase__: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__: Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 lowerCamelCase__: Optional[int] = unet.half() lowerCamelCase__: Optional[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) lowerCamelCase__: List[Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase__: Tuple = """A painting of a squirrel eating a burger""" lowerCamelCase__: Optional[int] = torch.manual_seed(0 ) lowerCamelCase__: Optional[Any] = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="""np""" , ).images lowerCamelCase__: Optional[int] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowerCamelCase__: Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) lowerCamelCase__: List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) lowerCamelCase__: Dict = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() lowerCamelCase__: List[Any] = """a cat sitting on a park bench""" lowerCamelCase__: Dict = torch.manual_seed(0 ) lowerCamelCase__: Any = pipe( prompt=__a , image=__a , generator=__a , output_type="""np""" , ) lowerCamelCase__: Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowerCamelCase__: Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) lowerCamelCase__: int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) lowerCamelCase__: int = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() lowerCamelCase__: Any = """a cat sitting on a park bench""" lowerCamelCase__: Tuple = torch.manual_seed(0 ) lowerCamelCase__: Optional[int] = pipe( prompt=__a , image=__a , generator=__a , output_type="""np""" , ) lowerCamelCase__: int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__: Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) lowerCamelCase__: Tuple = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase__: str = """a cat sitting on a park bench""" lowerCamelCase__: int = torch.manual_seed(0 ) lowerCamelCase__: Optional[Any] = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="""np""" , ) lowerCamelCase__: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
306
1
"""simple docstring""" from collections.abc import Generator def lowercase_ ( ): """simple docstring""" A_ , A_ : List[str] = 0, 1 while True: A_ , A_ : Tuple = b, a + b yield b def lowercase_ ( _UpperCAmelCase = 1000 ): """simple docstring""" A_ : Optional[Any] = 1 A_ : List[Any] = fibonacci_generator() while len(str(next(_UpperCAmelCase ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
711
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowercase_ ( _UpperCAmelCase ): """simple docstring""" A_ : Optional[Any] = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def lowercase_ ( _UpperCAmelCase ): """simple docstring""" A_ , A_ : List[Any] = emb.weight.shape A_ : List[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) A_ : Any = emb.weight.data return lin_layer def lowercase_ ( _UpperCAmelCase ): """simple docstring""" A_ : int = torch.load(_UpperCAmelCase , map_location='''cpu''' ) A_ : Any = Namespace(**checkpoint['''cfg''']['''model'''] ) A_ : List[str] = checkpoint['''model'''] remove_ignore_keys_(_UpperCAmelCase ) A_ : Union[str, Any] = state_dict['''decoder.embed_tokens.weight'''].shape[0] A_ : List[str] = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} A_ : int = XGLMConfig( vocab_size=_UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A_ : Any = XGLMForCausalLM(_UpperCAmelCase ) A_ : int = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) print(_UpperCAmelCase ) A_ : int = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowerCamelCase : int = parser.parse_args() _lowerCamelCase : Optional[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
361
0
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : List[Any] = mean_squared_error( __magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ ) return {"mse": mse}
60
1
'''simple docstring''' import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCAmelCase__ : def __init__( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : int=16 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Union[str, Any]=None , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : int = parent lowerCamelCase_ : Dict = batch_size lowerCamelCase_ : Optional[int] = seq_length lowerCamelCase_ : Optional[Any] = is_training lowerCamelCase_ : Any = use_input_mask lowerCamelCase_ : Union[str, Any] = use_token_type_ids lowerCamelCase_ : List[Any] = use_labels lowerCamelCase_ : Union[str, Any] = vocab_size lowerCamelCase_ : Union[str, Any] = hidden_size lowerCamelCase_ : Dict = num_hidden_layers lowerCamelCase_ : Tuple = num_attention_heads lowerCamelCase_ : List[Any] = intermediate_multiple_size lowerCamelCase_ : List[Any] = hidden_act lowerCamelCase_ : Tuple = hidden_dropout lowerCamelCase_ : Tuple = attention_dropout lowerCamelCase_ : str = weight_tying lowerCamelCase_ : Optional[Any] = max_position_embeddings lowerCamelCase_ : Union[str, Any] = type_vocab_size lowerCamelCase_ : Dict = type_sequence_label_size lowerCamelCase_ : Optional[int] = initializer_range lowerCamelCase_ : Dict = num_labels lowerCamelCase_ : List[str] = num_choices lowerCamelCase_ : List[str] = scope def __UpperCamelCase ( self : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : int = None if self.use_input_mask: lowerCamelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ : int = None if self.use_labels: lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : Dict = self.prepare_config_and_inputs() lowerCamelCase_ : str = True return config, input_ids, input_mask, token_labels def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ : Union[str, Any] = GPTNeoXJapaneseModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase_ : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCamelCase_ : Any = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ : Dict = True lowerCamelCase_ : Union[str, Any] = GPTNeoXJapaneseModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase_ : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : int = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase_ : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> str: """simple docstring""" lowerCamelCase_ : Any = True lowerCamelCase_ : List[str] = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCamelCase_ : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) lowerCamelCase_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCamelCase_ : str = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ : int = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCamelCase_ : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) lowerCamelCase_ : int = output_from_no_past['''hidden_states'''][0] lowerCamelCase_ : Any = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] # select random slice lowerCamelCase_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ : str = self.prepare_config_and_inputs() lowerCamelCase_ : int = config_and_inputs lowerCamelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ): A = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () A = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () A = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) A = False A = False A = False A = False def __UpperCamelCase ( self : List[str] ) -> int: """simple docstring""" lowerCamelCase_ : List[Any] = GPTNeoXJapaneseModelTester(self ) lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def __UpperCamelCase ( self : str ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCamelCase_ : str = None self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_ ) @slow def __UpperCamelCase ( self : Optional[Any] ) -> str: """simple docstring""" lowerCamelCase_ : Union[str, Any] = '''abeja/gpt-neox-japanese-2.7b''' lowerCamelCase_ : Any = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、'''] lowerCamelCase_ : int = [ '''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''', '''100年後に必要とされる会社は、「人」が中心の会社です。''', '''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''', '''国境の長いトンネルを抜けると、そこは雪国だった。''', '''美味しい日本食といえば、やっぱりお寿司ですよね。''', ] lowerCamelCase_ : int = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase_ ) lowerCamelCase_ : Optional[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase_ ) lowerCamelCase_ : int = [] for prompt in prompts: lowerCamelCase_ : Optional[int] = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ).input_ids lowerCamelCase_ : int = model.generate(UpperCamelCase_ , max_length=50 ) lowerCamelCase_ : Optional[Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) predicted_outputs += generated_string self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
706
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,_lowerCAmelCase ,) class lowerCAmelCase__ ( _lowerCAmelCase ): A = RobertaConfig A = "roberta" def __init__( self : str , UpperCamelCase_ : Optional[Any] ) -> str: """simple docstring""" super().__init__(UpperCamelCase_ ) lowerCamelCase_ : List[str] = RobertaEmbeddings(UpperCamelCase_ ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,_lowerCAmelCase ,) class lowerCAmelCase__ ( _lowerCAmelCase ): A = RobertaConfig A = "roberta" def __init__( self : Optional[int] , UpperCamelCase_ : List[str] ) -> Tuple: """simple docstring""" super().__init__(UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = config.num_labels lowerCamelCase_ : Dict = config.num_hidden_layers lowerCamelCase_ : Union[str, Any] = DeeRobertaModel(UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=-1 , UpperCamelCase_ : Optional[Any]=False , ) -> Tuple: """simple docstring""" lowerCamelCase_ : Union[str, Any] = self.num_layers try: lowerCamelCase_ : Union[str, Any] = self.roberta( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) lowerCamelCase_ : Union[str, Any] = outputs[1] lowerCamelCase_ : Optional[int] = self.dropout(UpperCamelCase_ ) lowerCamelCase_ : Dict = self.classifier(UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCamelCase_ : List[str] = e.message lowerCamelCase_ : List[str] = e.exit_layer lowerCamelCase_ : Optional[Any] = outputs[0] if not self.training: lowerCamelCase_ : str = entropy(UpperCamelCase_ ) lowerCamelCase_ : Tuple = [] lowerCamelCase_ : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCamelCase_ : List[Any] = MSELoss() lowerCamelCase_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase_ : Optional[int] = CrossEntropyLoss() lowerCamelCase_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowerCamelCase_ : Optional[Any] = [] for highway_exit in outputs[-1]: lowerCamelCase_ : List[str] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCamelCase_ : Union[str, Any] = MSELoss() lowerCamelCase_ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase_ : Union[str, Any] = CrossEntropyLoss() lowerCamelCase_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: lowerCamelCase_ : Any = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCamelCase_ : Optional[int] = (loss,) + outputs if not self.training: lowerCamelCase_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCamelCase_ : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
418
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a=2 , _a=True , _a=False , _a=1_0 , _a=3 , _a=3_2 * 4 , _a=3_2 * 6 , _a=4 , _a=3_2 , ) -> Union[str, Any]: _a : int = parent _a : List[str] = batch_size _a : Optional[int] = is_training _a : Tuple = use_auxiliary_loss _a : Tuple = num_queries _a : List[str] = num_channels _a : Tuple = min_size _a : Union[str, Any] = max_size _a : Optional[Any] = num_labels _a : int = mask_feature_size def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _a ) _a : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a ) _a : List[Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5 ).float() _a : int = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long() _a : int = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __lowercase ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __lowercase ( self ) -> List[Any]: _a , _a , _a , _a , _a : Optional[int] = self.prepare_config_and_inputs() _a : str = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __lowercase ( self , _a , _a ) -> Optional[Any]: _a : Dict = output.encoder_hidden_states _a : Tuple = output.pixel_decoder_hidden_states _a : Any = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) , config.decoder_config.decoder_layers ) def __lowercase ( self , _a , _a , _a , _a=False ) -> Tuple: with torch.no_grad(): _a : Tuple = MaskFormerModel(config=_a ) model.to(_a ) model.eval() _a : Optional[int] = model(pixel_values=_a , pixel_mask=_a ) _a : int = model(_a , output_hidden_states=_a ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_a , _a ) def __lowercase ( self , _a , _a , _a , _a , _a ) -> Tuple: _a : Tuple = MaskFormerForInstanceSegmentation(config=_a ) model.to(_a ) model.eval() def comm_check_on_output(_a ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _a : Optional[Any] = model(pixel_values=_a , pixel_mask=_a ) _a : str = model(_a ) comm_check_on_output(_a ) _a : Tuple = model( pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a ) comm_check_on_output(_a ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () UpperCAmelCase__ : Dict = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Any = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = False def __lowercase ( self ) -> str: _a : str = MaskFormerModelTester(self ) _a : Any = ConfigTester(self , config_class=_a , has_text_modality=_a ) def __lowercase ( self ) -> str: self.config_tester.run_common_tests() def __lowercase ( self ) -> str: _a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a ) def __lowercase ( self ) -> str: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_a ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def __lowercase ( self ) -> Optional[int]: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def __lowercase ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def __lowercase ( self ) -> Union[str, Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def __lowercase ( self ) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __lowercase ( self ) -> Dict: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowercase ( self ) -> Tuple: pass def __lowercase ( self ) -> str: _a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Optional[int] = model_class(_a ) _a : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Tuple = [*signature.parameters.keys()] _a : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) @slow def __lowercase ( self ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _a : Any = MaskFormerModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def __lowercase ( self ) -> List[str]: _a : Any = (self.model_tester.min_size,) * 2 _a : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=_a ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=_a ), '''class_labels''': torch.zeros(2 , 1_0 , device=_a ).long(), } _a : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_a ) _a : List[Any] = model(**_a ) self.assertTrue(outputs.loss is not None ) def __lowercase ( self ) -> Any: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a ) def __lowercase ( self ) -> List[str]: _a , _a : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Dict = model_class(_a ).to(_a ) _a : str = model(**_a , output_attentions=_a ) self.assertTrue(outputs.attentions is not None ) def __lowercase ( self ) -> List[Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _a : Tuple = self.all_model_classes[1] _a , _a , _a , _a , _a : int = self.model_tester.prepare_config_and_inputs() _a : Dict = model_class(_a ) model.to(_a ) model.train() _a : Optional[int] = model(_a , mask_labels=_a , class_labels=_a ).loss loss.backward() def __lowercase ( self ) -> Dict: # only MaskFormerForInstanceSegmentation has the loss _a : List[str] = self.all_model_classes[1] _a , _a , _a , _a , _a : Tuple = self.model_tester.prepare_config_and_inputs() _a : Union[str, Any] = True _a : Tuple = True _a : Optional[Any] = model_class(_a ) model.to(_a ) model.train() _a : Optional[int] = model(_a , mask_labels=_a , class_labels=_a ) _a : Any = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _a : Optional[Any] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _a : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _a : Optional[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_a ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) a__ = 1E-4 def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _a : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def __lowercase ( self ) -> Any: _a : Dict = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_a ) _a : Optional[int] = self.default_image_processor _a : List[Any] = prepare_img() _a : int = image_processor(_a , return_tensors='''pt''' ).to(_a ) _a : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _a : List[str] = model(**_a ) _a : Union[str, Any] = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_a ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) ) _a : Optional[Any] = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_a ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) ) _a : Tuple = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_a ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) ) def __lowercase ( self ) -> Optional[Any]: _a : Tuple = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(_a ) .eval() ) _a : Optional[int] = self.default_image_processor _a : Optional[int] = prepare_img() _a : Union[str, Any] = image_processor(_a , return_tensors='''pt''' ).to(_a ) _a : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _a : Any = model(**_a ) # masks_queries_logits _a : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _a : Optional[int] = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _a : Tuple = torch.tensor(_a ).to(_a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) ) # class_queries_logits _a : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _a : Dict = torch.tensor( [ [1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0], [3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0], [1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0], ] ).to(_a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) ) def __lowercase ( self ) -> str: _a : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(_a ) .eval() ) _a : Dict = self.default_image_processor _a : str = prepare_img() _a : Union[str, Any] = image_processor(_a , return_tensors='''pt''' ).to(_a ) _a : Union[str, Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _a : int = model(**_a ) # masks_queries_logits _a : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _a : Union[str, Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _a : str = torch.tensor(_a ).to(_a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) ) # class_queries_logits _a : str = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _a : Union[str, Any] = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) ) def __lowercase ( self ) -> Union[str, Any]: _a : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(_a ) .eval() ) _a : Optional[Any] = self.default_image_processor _a : str = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _a : List[str] = inputs['''pixel_values'''].to(_a ) _a : Dict = [el.to(_a ) for el in inputs['''mask_labels''']] _a : List[str] = [el.to(_a ) for el in inputs['''class_labels''']] with torch.no_grad(): _a : Tuple = model(**_a ) self.assertTrue(outputs.loss is not None )
14
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class A_ ( _a ): lowerCAmelCase__ = (DDIMParallelScheduler,) lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0)) def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[int] = { "num_train_timesteps": 1_000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**__lowerCAmelCase ) return config def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase ) _lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0 _lowerCamelCase : List[Any] = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for t in scheduler.timesteps: _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample return sample def _lowercase ( self: List[str] ): '''simple docstring''' for timesteps in [100, 500, 1_000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 ) _lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) ) def _lowercase ( self: Any ): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCAmelCase ) def _lowercase ( self: Optional[int] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' self.check_over_configs(thresholding=__lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ): self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5 def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : Union[str, Any] = self.get_scheduler_config() _lowerCamelCase : str = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0 scheduler.set_timesteps(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.dummy_model() _lowerCamelCase : Optional[int] = self.dummy_sample_deter _lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1 _lowerCamelCase : Dict = self.dummy_sample_deter - 0.1 _lowerCamelCase : Union[str, Any] = samplea.shape[0] _lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 ) _lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase ) _lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) _lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase ) _lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2 assert abs(result_mean.item() - 0.49_82 ) < 1e-3 def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Any = self.full_loop() _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3 def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" ) _lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 52.53_02 ) < 1e-2 assert abs(result_mean.item() - 0.06_84 ) < 1e-3 def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 ) _lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2 assert abs(result_mean.item() - 0.19_51 ) < 1e-3 def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 ) _lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2 assert abs(result_mean.item() - 0.19_41 ) < 1e-3
46
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( a__ ): """simple docstring""" snake_case__ = ["""image_processor""", """tokenizer"""] snake_case__ = """BlipImageProcessor""" snake_case__ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ): UpperCAmelCase__ = False super().__init__(lowerCAmelCase__ ,lowerCAmelCase__ ) UpperCAmelCase__ = self.image_processor def __call__( self : Tuple ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Any] = None ,lowerCamelCase__ : int = True ,lowerCamelCase__ : List[Any] = False ,lowerCamelCase__ : Tuple = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : List[Any] = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Any = None ,lowerCamelCase__ : Union[str, Any] = False ,lowerCamelCase__ : List[Any] = False ,lowerCamelCase__ : int = False ,lowerCamelCase__ : Optional[int] = False ,lowerCamelCase__ : Optional[Any] = False ,lowerCamelCase__ : Optional[int] = True ,lowerCamelCase__ : Union[str, Any] = None ,**lowerCamelCase__ : Tuple ,): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: UpperCAmelCase__ = self.tokenizer UpperCAmelCase__ = self.tokenizer( text=lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,stride=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,return_overflowing_tokens=lowerCAmelCase__ ,return_special_tokens_mask=lowerCAmelCase__ ,return_offsets_mapping=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ ,return_length=lowerCAmelCase__ ,verbose=lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ ,) return text_encoding # add pixel_values UpperCAmelCase__ = self.image_processor(lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ) if text is not None: UpperCAmelCase__ = self.tokenizer( text=lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,stride=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,return_overflowing_tokens=lowerCAmelCase__ ,return_special_tokens_mask=lowerCAmelCase__ ,return_offsets_mapping=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ ,return_length=lowerCAmelCase__ ,verbose=lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ ,) else: UpperCAmelCase__ = None if text_encoding is not None: encoding_image_processor.update(lowerCAmelCase__ ) return encoding_image_processor def __lowerCAmelCase ( self : List[Any] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Union[str, Any] ): return self.tokenizer.batch_decode(*lowerCAmelCase__ ,**lowerCAmelCase__ ) def __lowerCAmelCase ( self : Any ,*lowerCamelCase__ : int ,**lowerCamelCase__ : int ): return self.tokenizer.decode(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @property def __lowerCAmelCase ( self : str ): UpperCAmelCase__ = self.tokenizer.model_input_names UpperCAmelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
709
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__) lowerCAmelCase__ : Optional[Any] = ['model.decoder.embed_positions.weights'] def a_ ( lowerCamelCase ): if "emb" in name: UpperCAmelCase__ = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: UpperCAmelCase__ = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: UpperCAmelCase__ = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: UpperCAmelCase__ = name.replace('linear1' , 'fc1' ) if "linear2" in name: UpperCAmelCase__ = name.replace('linear2' , 'fc2' ) if "norm1" in name: UpperCAmelCase__ = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: UpperCAmelCase__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: UpperCAmelCase__ = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: UpperCAmelCase__ = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: UpperCAmelCase__ = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = list(state_dict.keys() ) UpperCAmelCase__ = {} for key in keys: UpperCAmelCase__ = state_dict.pop(lowerCamelCase ) UpperCAmelCase__ = rename_keys(lowerCamelCase ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase__ = val[:hidden_size, :] UpperCAmelCase__ = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase__ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase__ = val else: UpperCAmelCase__ = val return state_dict, enc_dec_proj_state_dict def a_ ( lowerCamelCase ): if checkpoint == "small": # default config values UpperCAmelCase__ = 1_0_2_4 UpperCAmelCase__ = 2_4 UpperCAmelCase__ = 1_6 elif checkpoint == "medium": UpperCAmelCase__ = 1_5_3_6 UpperCAmelCase__ = 4_8 UpperCAmelCase__ = 2_4 elif checkpoint == "large": UpperCAmelCase__ = 2_0_4_8 UpperCAmelCase__ = 4_8 UpperCAmelCase__ = 3_2 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) UpperCAmelCase__ = MusicgenDecoderConfig( hidden_size=lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase , num_attention_heads=lowerCamelCase , ) return config @torch.no_grad() def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="cpu" ): UpperCAmelCase__ = MusicGen.get_pretrained(lowerCamelCase , device=lowerCamelCase ) UpperCAmelCase__ = decoder_config_from_checkpoint(lowerCamelCase ) UpperCAmelCase__ = fairseq_model.lm.state_dict() UpperCAmelCase__ , UpperCAmelCase__ = rename_state_dict( lowerCamelCase , hidden_size=decoder_config.hidden_size ) UpperCAmelCase__ = TaEncoderModel.from_pretrained('t5-base' ) UpperCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_32khz' ) UpperCAmelCase__ = MusicgenForCausalLM(lowerCamelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase__ , UpperCAmelCase__ = decoder.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowerCamelCase ) if len(lowerCamelCase ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(lowerCamelCase ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model UpperCAmelCase__ = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase , audio_encoder=lowerCamelCase , decoder=lowerCamelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowerCamelCase ) # check we can do a forward pass UpperCAmelCase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase__ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase__ = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits if logits.shape != (8, 1, 2_0_4_8): raise ValueError('Incorrect shape for logits' ) # now construct the processor UpperCAmelCase__ = AutoTokenizer.from_pretrained('t5-base' ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) UpperCAmelCase__ = MusicgenProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase ) # set the appropriate bos/pad token ids UpperCAmelCase__ = 2_0_4_8 UpperCAmelCase__ = 2_0_4_8 # set other default generation config params UpperCAmelCase__ = int(3_0 * audio_encoder.config.frame_rate ) UpperCAmelCase__ = True UpperCAmelCase__ = 3.0 if pytorch_dump_folder is not None: Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(lowerCamelCase ) processor.push_to_hub(lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) lowerCAmelCase__ : List[str] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
632
0
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor _UpperCamelCase : List[str] = logging.getLogger(__name__) _UpperCamelCase : Tuple = 50 # max width of layer names _UpperCamelCase : int = 70 # max width of quantizer names def __snake_case ( lowerCAmelCase : str ): __UpperCAmelCase = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=lowerCAmelCase , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=lowerCAmelCase , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=lowerCAmelCase , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=lowerCAmelCase , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=lowerCAmelCase , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=lowerCAmelCase , type=lowerCAmelCase , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=lowerCAmelCase , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def __snake_case ( lowerCAmelCase : Union[str, Any] ): if args.calibrator == "max": __UpperCAmelCase = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) __UpperCAmelCase = 'histogram' elif args.calibrator == "mse": __UpperCAmelCase = 'histogram' else: raise ValueError(F"""Invalid calibrator {args.calibrator}""" ) __UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase ) __UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase ) def __snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any=False , lowerCAmelCase : Dict=False ): logger.info('Configuring Model for Quantization' ) logger.info(F"""using quantization package {pytorch_quantization.__file__}""" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase , ['embeddings'] , which='weight' , _disabled=lowerCAmelCase ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase , [''] , _disabled=lowerCAmelCase ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase , args.quant_disable_keyword , _disabled=lowerCAmelCase ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase , lowerCAmelCase ) if args.clip_gelu: clip_gelu(lowerCAmelCase , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase ) def __snake_case ( lowerCAmelCase : Union[str, Any] ): logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F"""{name:80}: {module}""" ) def __snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ): logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase ) def __snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ): def fusea(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : int ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return __UpperCAmelCase = qq._amax.detach().item() __UpperCAmelCase = qk._amax.detach().item() __UpperCAmelCase = qv._amax.detach().item() __UpperCAmelCase = max(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) qq._amax.fill_(lowerCAmelCase ) qk._amax.fill_(lowerCAmelCase ) qv._amax.fill_(lowerCAmelCase ) logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F"""FUSE_QKV: {name:{name_width}}""" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def __snake_case ( lowerCAmelCase : Any , lowerCAmelCase : Dict ): for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): __UpperCAmelCase = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase ) __UpperCAmelCase = mod._input_quantizer._amax.data.detach().item() logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" ) def __snake_case ( lowerCAmelCase : List[Any] ): for name, mod in model.named_modules(): if hasattr(lowerCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: __UpperCAmelCase = mod.weight.shape[0] __UpperCAmelCase = mod._weight_quantizer._amax.detach() __UpperCAmelCase = torch.ones(lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" ) def __snake_case ( lowerCAmelCase : Optional[Any] ): for name, mod in model.named_modules(): if hasattr(lowerCAmelCase , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) __UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) __UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set __UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase , keepdims=lowerCAmelCase ).detach() logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" ) __UpperCAmelCase = amax def __snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=25 , lowerCAmelCase : Union[str, Any]=180 , lowerCAmelCase : List[Any]=None ): if ignore is None: __UpperCAmelCase = [] elif not isinstance(lowerCAmelCase , lowerCAmelCase ): __UpperCAmelCase = [ignore] __UpperCAmelCase = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase , 'weight' ): continue __UpperCAmelCase = max(lowerCAmelCase , len(lowerCAmelCase ) ) for name, mod in model.named_modules(): __UpperCAmelCase = getattr(lowerCAmelCase , '_input_quantizer' , lowerCAmelCase ) __UpperCAmelCase = getattr(lowerCAmelCase , '_weight_quantizer' , lowerCAmelCase ) if not hasattr(lowerCAmelCase , 'weight' ): continue if type(lowerCAmelCase ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase ) is str and s in name]: continue __UpperCAmelCase = F"""Act:{input_q.extra_repr()}""" __UpperCAmelCase = F"""Wgt:{weight_q.extra_repr()}""" __UpperCAmelCase = F"""{name:{name_width}} {act_str} {wgt_str}""" if len(lowerCAmelCase ) <= line_width: logger.info(lowerCAmelCase ) else: logger.info(F"""{name:{name_width}} {act_str}""" ) logger.info(F"""{' ':{name_width}} {wgt_str}""" ) def __snake_case ( lowerCAmelCase : Union[str, Any] ): __UpperCAmelCase = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ): print(F"""{name:80} {mod}""" ) count += 1 print(F"""{count} TensorQuantizers found in model""" ) def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] ): __UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase , lowerCAmelCase ) setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: logger.warning(F"""{name} has no {quantizer}""" ) def __snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]="both" , **lowerCAmelCase : Dict ): __UpperCAmelCase = F"""Warning: changing {which} quantizers of {name:{qname_width}}""" for k, v in kwargs.items(): s += F""" {k}={v}""" if which in ["input", "both"]: set_quantizer(lowerCAmelCase , lowerCAmelCase , '_input_quantizer' , lowerCAmelCase , lowerCAmelCase ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase , lowerCAmelCase , '_weight_quantizer' , lowerCAmelCase , lowerCAmelCase ) logger.info(lowerCAmelCase ) def __snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int] ): for name, mod in model.named_modules(): if hasattr(lowerCAmelCase , '_input_quantizer' ) or hasattr(lowerCAmelCase , '_weight_quantizer' ): for n in names: if re.search(lowerCAmelCase , lowerCAmelCase ): set_quantizers(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) elif name.endswith('_quantizer' ): for n in names: if re.search(lowerCAmelCase , lowerCAmelCase ): __UpperCAmelCase = F"""Warning: changing {name:{name_width}}""" for k, v in kwargs.items(): s += F""" {k}={v}""" setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) logger.info(lowerCAmelCase )
396
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _UpperCamelCase : str = logging.get_logger(__name__) class _lowercase( _lowerCamelCase ): """simple docstring""" def __init__( self: List[Any] ,*a: Dict ,**a: Union[str, Any] ): warnings.warn( 'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use OwlViTImageProcessor instead.' ,a ,) super().__init__(*a ,**a )
396
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCAmelCase__ =logging.get_logger(__name__) UpperCAmelCase__ ={ "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCamelCase__ ( _a ): a : Dict = """gptj""" a : Any = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : List[Any] , A_ : Tuple=5_0_4_0_0 , A_ : str=2_0_4_8 , A_ : str=4_0_9_6 , A_ : Dict=2_8 , A_ : Union[str, Any]=1_6 , A_ : List[str]=6_4 , A_ : Optional[int]=None , A_ : Optional[Any]="gelu_new" , A_ : Optional[int]=0.0 , A_ : str=0.0 , A_ : str=0.0 , A_ : Tuple=1e-5 , A_ : List[str]=0.02 , A_ : Any=True , A_ : int=5_0_2_5_6 , A_ : Optional[Any]=5_0_2_5_6 , A_ : Optional[int]=False , **A_ : List[Any] , ): '''simple docstring''' __lowercase = vocab_size __lowercase = n_positions __lowercase = n_embd __lowercase = n_layer __lowercase = n_head __lowercase = n_inner __lowercase = rotary_dim __lowercase = activation_function __lowercase = resid_pdrop __lowercase = embd_pdrop __lowercase = attn_pdrop __lowercase = layer_norm_epsilon __lowercase = initializer_range __lowercase = use_cache __lowercase = bos_token_id __lowercase = eos_token_id super().__init__( bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ ) class lowerCamelCase__ ( _a ): def __init__( self : Union[str, Any] , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ): '''simple docstring''' super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ ) if not getattr(self._config , """pad_token_id""" , A_ ): # TODO: how to do that better? __lowercase = 0 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' __lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(A_ , direction="""inputs""" ) __lowercase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowercase = {0: """batch""", 1: """sequence"""} return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._config.n_layer @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._config.n_head def SCREAMING_SNAKE_CASE_ ( self : int , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ): '''simple docstring''' __lowercase = super(A_ , self ).generate_dummy_inputs( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) # We need to order the input in the way they appears in the forward() __lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowercase , __lowercase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowercase = seqlen + 2 __lowercase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowercase = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers ) ] __lowercase = common_inputs["""attention_mask"""] if self.use_past: __lowercase = ordered_inputs["""attention_mask"""].dtype __lowercase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) return ordered_inputs @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return 1_3
442
"""simple docstring""" from __future__ import annotations def lowerCAmelCase_ ( UpperCamelCase__ : list[float] ): """simple docstring""" if len(UpperCamelCase__ ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) __lowercase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
442
1
from math import pi def __a ( __UpperCAmelCase , __UpperCAmelCase ): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
194
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[Any] = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : int = '''align_text_model''' def __init__( self , SCREAMING_SNAKE_CASE=3_0_5_2_2 , SCREAMING_SNAKE_CASE=7_6_8 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=3_0_7_2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE ) a__ = vocab_size a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = hidden_act a__ = intermediate_size a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = max_position_embeddings a__ = type_vocab_size a__ = initializer_range a__ = layer_norm_eps a__ = position_embedding_type a__ = use_cache a__ = pad_token_id @classmethod def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) a__ , a__ = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": a__ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : str = '''align_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 6_0_0 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 3.1 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , SCREAMING_SNAKE_CASE = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , SCREAMING_SNAKE_CASE = [] , SCREAMING_SNAKE_CASE = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE = 0.25 , SCREAMING_SNAKE_CASE = "swish" , SCREAMING_SNAKE_CASE = 2_5_6_0 , SCREAMING_SNAKE_CASE = "mean" , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 0.0_01 , SCREAMING_SNAKE_CASE = 0.99 , SCREAMING_SNAKE_CASE = 0.2 , **SCREAMING_SNAKE_CASE , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE ) a__ = num_channels a__ = image_size a__ = width_coefficient a__ = depth_coefficient a__ = depth_divisor a__ = kernel_sizes a__ = in_channels a__ = out_channels a__ = depthwise_padding a__ = strides a__ = num_block_repeats a__ = expand_ratios a__ = squeeze_expansion_ratio a__ = hidden_act a__ = hidden_dim a__ = pooling_type a__ = initializer_range a__ = batch_norm_eps a__ = batch_norm_momentum a__ = drop_connect_rate a__ = sum(SCREAMING_SNAKE_CASE ) * 4 @classmethod def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) a__ , a__ = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": a__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : Dict = '''align''' _lowercase : Optional[Any] = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=6_4_0 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , **SCREAMING_SNAKE_CASE , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE ) if text_config is None: a__ = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: a__ = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) a__ = AlignTextConfig(**SCREAMING_SNAKE_CASE ) a__ = AlignVisionConfig(**SCREAMING_SNAKE_CASE ) a__ = projection_dim a__ = temperature_init_value a__ = initializer_range @classmethod def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self ) -> int: a__ = copy.deepcopy(self.__dict__ ) a__ = self.text_config.to_dict() a__ = self.vision_config.to_dict() a__ = self.__class__.model_type return output
194
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A: Union[str, Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: int = [ "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: List[Any] = [ "TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTMAEPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys A: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A: int = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: Union[str, Any] = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar _snake_case : Union[str, Any] = TypeVar("T") class a (Generic[T] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase : list[T] , lowerCamelCase : Callable[[T, T], T] ) -> None: __snake_case : Any | T = None __snake_case : int = len(lowerCamelCase ) __snake_case : list[T] = [any_type for _ in range(self.N )] + arr __snake_case : Tuple = fnc self.build() def __snake_case ( self : Dict ) -> None: for p in range(self.N - 1 , 0 , -1 ): __snake_case : List[str] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __snake_case ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : T ) -> None: p += self.N __snake_case : str = v while p > 1: __snake_case : Dict = p // 2 __snake_case : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int ) -> T | None: # noqa: E741 __snake_case , __snake_case : Optional[int] = l + self.N, r + self.N __snake_case : T | None = None while l <= r: if l % 2 == 1: __snake_case : Optional[Any] = self.st[l] if res is None else self.fn(lowerCamelCase , self.st[l] ) if r % 2 == 0: __snake_case : int = self.st[r] if res is None else self.fn(lowerCamelCase , self.st[r] ) __snake_case , __snake_case : List[str] = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce _snake_case : Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] _snake_case : Dict = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } _snake_case : Optional[Any] = SegmentTree(test_array, min) _snake_case : Optional[Any] = SegmentTree(test_array, max) _snake_case : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b) def lowerCAmelCase_ ( ): for i in range(len(__lowerCamelCase ) ): for j in range(__lowerCamelCase , len(__lowerCamelCase ) ): __snake_case : Optional[int] = reduce(__lowerCamelCase , test_array[i : j + 1] ) __snake_case : List[Any] = reduce(__lowerCamelCase , test_array[i : j + 1] ) __snake_case : Union[str, Any] = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowerCamelCase , __lowerCamelCase ) assert max_range == max_segment_tree.query(__lowerCamelCase , __lowerCamelCase ) assert sum_range == sum_segment_tree.query(__lowerCamelCase , __lowerCamelCase ) test_all_segments() for index, value in test_updates.items(): _snake_case : List[Any] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
81
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" return round(float(moles / volume ) * nfactor ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" return round(float((moles * 0.08_21 * temperature) / (volume) ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" return round(float((moles * 0.08_21 * temperature) / (pressure) ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" return round(float((pressure * volume) / (0.08_21 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
202
0
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __magic_name__ ( _snake_case ): def __init__( self : Any , lowerCAmelCase__ : str = "▁" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[str, AddedToken] = "<unk>" , lowerCAmelCase__ : Union[str, AddedToken] = "</s>" , lowerCAmelCase__ : Union[str, AddedToken] = "<pad>" , ) -> Optional[Any]: UpperCAmelCase = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } UpperCAmelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): UpperCAmelCase = token_dict["token"] UpperCAmelCase = Tokenizer(Unigram() ) UpperCAmelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ) , " " ), normalizers.Lowercase(), ] ) UpperCAmelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ), pre_tokenizers.Digits(individual_digits=lowerCAmelCase__ ), pre_tokenizers.Punctuation(), ] ) UpperCAmelCase = decoders.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) UpperCAmelCase = TemplateProcessing( single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) UpperCAmelCase = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 8_0_0_0 , lowerCAmelCase__ : bool = True , ) -> List[str]: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase = [files] self._tokenizer.train(lowerCAmelCase__ , trainer=lowerCAmelCase__ ) self.add_unk_id() def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , lowerCAmelCase__ : int = 8_0_0_0 , lowerCAmelCase__ : bool = True , ) -> List[Any]: UpperCAmelCase = trainers.UnigramTrainer( vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , ) self._tokenizer.train_from_iterator(lowerCAmelCase__ , trainer=lowerCAmelCase__ ) self.add_unk_id() def _UpperCamelCase ( self : List[Any] ) -> Dict: UpperCAmelCase = json.loads(self._tokenizer.to_str() ) UpperCAmelCase = self.special_tokens["unk"]["id"] UpperCAmelCase = Tokenizer.from_str(json.dumps(lowerCAmelCase__ ) )
711
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCAmelCase( __A ): UpperCAmelCase = fname.split(os.path.sep )[-1] return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0] class __magic_name__ ( _snake_case ): def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]: UpperCAmelCase = file_names UpperCAmelCase = image_transform UpperCAmelCase = label_to_id def __len__( self : Tuple ) -> List[str]: return len(self.file_names ) def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict: UpperCAmelCase = self.file_names[idx] UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ ) UpperCAmelCase = raw_image.convert("RGB" ) if self.image_transform is not None: UpperCAmelCase = self.image_transform(lowerCAmelCase__ ) UpperCAmelCase = extract_label(lowerCAmelCase__ ) if self.label_to_id is not None: UpperCAmelCase = self.label_to_id[label] return {"image": image, "label": label} def _lowerCAmelCase( __A , __A ): # Initialize accelerator if args.with_tracking: UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config["lr"] UpperCAmelCase = int(config["num_epochs"] ) UpperCAmelCase = int(config["seed"] ) UpperCAmelCase = int(config["batch_size"] ) UpperCAmelCase = config["image_size"] if not isinstance(__A , (list, tuple) ): UpperCAmelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , "isdigit" ): if args.checkpointing_steps == "epoch": UpperCAmelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): UpperCAmelCase = int(args.checkpointing_steps ) else: raise ValueError( F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: UpperCAmelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: UpperCAmelCase = os.path.split(__A )[-1].split("." )[0] accelerator.init_trackers(__A , __A ) # Grab all the image filenames UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences UpperCAmelCase = [extract_label(__A ) for fname in file_names] UpperCAmelCase = list(set(__A ) ) id_to_label.sort() UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )} # Set the seed before splitting the data. np.random.seed(__A ) torch.manual_seed(__A ) torch.cuda.manual_seed_all(__A ) # Split our filenames between train and validation UpperCAmelCase = np.random.permutation(len(__A ) ) UpperCAmelCase = int(0.8 * len(__A ) ) UpperCAmelCase = random_perm[:cut] UpperCAmelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] ) UpperCAmelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A ) # For evaluation, we use a deterministic Resize UpperCAmelCase = Compose([Resize(__A ), ToTensor()] ) UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A ) # Instantiate dataloaders. UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 ) UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): UpperCAmelCase = False for param in model.get_classifier().parameters(): UpperCAmelCase = True # We normalize the batches of images to be a bit faster. UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __A , __A , __A , __A , __A ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase = 0 # We also need to keep track of the starting epoch so files are named properly UpperCAmelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" ) accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` UpperCAmelCase = os.path.splitext(__A )[0] if "epoch" in training_difference: UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1 UpperCAmelCase = None else: UpperCAmelCase = int(training_difference.replace("step_" , "" ) ) UpperCAmelCase = resume_step // len(__A ) resume_step -= starting_epoch * len(__A ) # Now we train the model for epoch in range(__A , __A ): model.train() if args.with_tracking: UpperCAmelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step UpperCAmelCase = accelerator.skip_first_batches(__A , __A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader UpperCAmelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch["image"] - mean) / std UpperCAmelCase = model(__A ) UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__A , __A ): UpperCAmelCase = F"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , __A ) accelerator.save_state(__A ) model.eval() UpperCAmelCase = 0 UpperCAmelCase = 0 for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} UpperCAmelCase = (batch["image"] - mean) / std with torch.no_grad(): UpperCAmelCase = model(__A ) UpperCAmelCase = outputs.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) ) UpperCAmelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() UpperCAmelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(__A ), "epoch": epoch, } , step=__A , ) if checkpointing_steps == "epoch": UpperCAmelCase = F"epoch_{epoch}" if args.output_dir is not None: UpperCAmelCase = os.path.join(args.output_dir , __A ) accelerator.save_state(__A ) if args.with_tracking: accelerator.end_training() def _lowerCAmelCase( ): UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." ) parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , ) parser.add_argument( "--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(__A , __A ) if __name__ == "__main__": main()
1
0
'''simple docstring''' def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = 1 __snake_case : int = 2 while i * i <= n: __snake_case : int = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = 1 __snake_case : Any = 1 while True: i += 1 t_num += i if count_divisors(_snake_case ) > 500: break return t_num if __name__ == "__main__": print(solution())
26
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
0
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a ( A__ , A__ ) -> Union[str, Any]: '''simple docstring''' assert isinstance(A__ , A__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( A__ , A__ , A__ ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = tmp_path / '''cache''' SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__ : List[Any] = TextDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read() _check_text_dataset(A__ , A__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def a ( A__ , A__ , A__ ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE__ : Any = {'''text''': '''string'''} SCREAMING_SNAKE_CASE__ : Optional[int] = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__ : Tuple = ( Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ : List[str] = TextDatasetReader(A__ , features=A__ , cache_dir=A__ ).read() _check_text_dataset(A__ , A__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( A__ , A__ , A__ ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = tmp_path / '''cache''' SCREAMING_SNAKE_CASE__ : Dict = {'''text''': '''string'''} SCREAMING_SNAKE_CASE__ : str = TextDatasetReader(A__ , cache_dir=A__ , split=A__ ).read() _check_text_dataset(A__ , A__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def a ( A__ , A__ , A__ ) -> List[str]: '''simple docstring''' if issubclass(A__ , A__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = text_path elif issubclass(A__ , A__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = [text_path] SCREAMING_SNAKE_CASE__ : Optional[Any] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''text''': '''string'''} SCREAMING_SNAKE_CASE__ : List[Any] = TextDatasetReader(A__ , cache_dir=A__ ).read() _check_text_dataset(A__ , A__ ) def a ( A__ , A__ , A__=("train",) ) -> Dict: '''simple docstring''' assert isinstance(A__ , A__ ) for split in splits: SCREAMING_SNAKE_CASE__ : Any = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def a ( A__ , A__ , A__ ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE__ : Optional[int] = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__ : Tuple = TextDatasetReader({'''train''': text_path} , cache_dir=A__ , keep_in_memory=A__ ).read() _check_text_datasetdict(A__ , A__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def a ( A__ , A__ , A__ ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" SCREAMING_SNAKE_CASE__ : List[Any] = {'''text''': '''string'''} SCREAMING_SNAKE_CASE__ : str = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__ : str = ( Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ : Any = TextDatasetReader({'''train''': text_path} , features=A__ , cache_dir=A__ ).read() _check_text_datasetdict(A__ , A__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def a ( A__ , A__ , A__ ) -> Tuple: '''simple docstring''' if split: SCREAMING_SNAKE_CASE__ : Tuple = {split: text_path} else: SCREAMING_SNAKE_CASE__ : Tuple = '''train''' SCREAMING_SNAKE_CASE__ : List[str] = {'''train''': text_path, '''test''': text_path} SCREAMING_SNAKE_CASE__ : Any = tmp_path / '''cache''' SCREAMING_SNAKE_CASE__ : Optional[int] = {'''text''': '''string'''} SCREAMING_SNAKE_CASE__ : Dict = TextDatasetReader(A__ , cache_dir=A__ ).read() _check_text_datasetdict(A__ , A__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
250
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() a_ :Tuple = logging.get_logger(__name__) a_ :List[str] = 'Hello world! cécé herlolip' def a ( A__ , A__ , A__ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(A__ ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , A__ ) SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : List[str] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Any = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads['''mnli'''].dense.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : List[str] = model(A__ )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) ) else: SCREAMING_SNAKE_CASE__ : Dict = roberta.model(A__ )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) if __name__ == "__main__": a_ :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) a_ :str = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
250
1