code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None , **_lowerCAmelCase ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_lowercase : Optional[Any] = model
_lowercase : Optional[Any] = kwargs.get('model_save_dir' , _lowerCAmelCase )
_lowercase : Optional[Any] = kwargs.get('latest_model_name' , _lowerCAmelCase )
def __call__( self , **_lowerCAmelCase ):
_lowercase : int = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def __a ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_lowercase : Any = 'CPUExecutionProvider'
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase ):
_lowercase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowercase : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
_lowercase : Union[str, Any] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowercase : Dict = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
_lowercase : str = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def __a ( self , _lowerCAmelCase , **_lowerCAmelCase , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
_lowercase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
_lowercase : Optional[int] = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
_lowercase : Any = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
_lowercase : Any = Path(_lowerCAmelCase ).parent
_lowercase : str = Path(_lowerCAmelCase ).name
_lowercase : Tuple = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : List[str] = None
if len(str(_lowerCAmelCase ).split('@' ) ) == 2:
_lowercase , _lowercase : List[str] = model_id.split('@' )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 66 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( lowercase_ : Callable , lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE : Dict = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE : List[Any] = ya
__SCREAMING_SNAKE_CASE : Dict = xa
for k in range(lowercase_ ):
__SCREAMING_SNAKE_CASE : str = y[k] + step_size * ode_func(lowercase_ , y[k] )
__SCREAMING_SNAKE_CASE : int = y[k] + (
(step_size / 2) * (ode_func(lowercase_ , y[k] ) + ode_func(x + step_size , lowercase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]="attention" ):
UpperCAmelCase : List[Any] = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCAmelCase : Dict = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCAmelCase : Optional[int] = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCAmelCase : int = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False ):
if split_mlp_wi:
UpperCAmelCase : str = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCAmelCase : List[str] = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCAmelCase : List[str] = (wi_a, wi_a)
else:
UpperCAmelCase : Tuple = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCAmelCase : int = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Any ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _snake_case ( UpperCamelCase : dict , *, UpperCamelCase : int , UpperCamelCase : bool ):
UpperCAmelCase : List[Any] = traverse_util.flatten_dict(variables["""target"""] )
UpperCAmelCase : Union[str, Any] = {"""/""".join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase : List[Any] = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCamelCase )
UpperCAmelCase : List[Any] = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase : str = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = tax_attention_lookup(UpperCamelCase , UpperCamelCase , """encoder""" , """attention""" )
UpperCAmelCase : Tuple = layer_norm
UpperCAmelCase : List[str] = k.T
UpperCAmelCase : Optional[int] = o.T
UpperCAmelCase : int = q.T
UpperCAmelCase : Optional[int] = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase : int = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
UpperCAmelCase , UpperCAmelCase : Any = tax_mlp_lookup(UpperCamelCase , UpperCamelCase , """encoder""" , UpperCamelCase )
UpperCAmelCase : Any = layer_norm
if split_mlp_wi:
UpperCAmelCase : Tuple = wi[0].T
UpperCAmelCase : Optional[int] = wi[1].T
else:
UpperCAmelCase : int = wi.T
UpperCAmelCase : Union[str, Any] = wo.T
UpperCAmelCase : int = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCAmelCase : List[str] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase : str = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = tax_attention_lookup(UpperCamelCase , UpperCamelCase , """decoder""" , """self_attention""" )
UpperCAmelCase : int = layer_norm
UpperCAmelCase : List[str] = k.T
UpperCAmelCase : str = o.T
UpperCAmelCase : Optional[Any] = q.T
UpperCAmelCase : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase : Any = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = tax_attention_lookup(UpperCamelCase , UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
UpperCAmelCase : List[str] = layer_norm
UpperCAmelCase : Optional[int] = k.T
UpperCAmelCase : List[Any] = o.T
UpperCAmelCase : Optional[int] = q.T
UpperCAmelCase : Tuple = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase : str = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
UpperCAmelCase , UpperCAmelCase : str = tax_mlp_lookup(UpperCamelCase , UpperCamelCase , """decoder""" , UpperCamelCase )
UpperCAmelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCAmelCase : Union[str, Any] = wi[0].T
UpperCAmelCase : Union[str, Any] = wi[1].T
else:
UpperCAmelCase : Tuple = wi.T
UpperCAmelCase : List[Any] = wo.T
UpperCAmelCase : Union[str, Any] = old["""decoder/decoder_norm/scale"""]
UpperCAmelCase : Optional[int] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : bool ):
UpperCAmelCase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase : Tuple = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase : Optional[Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCAmelCase : Optional[int] = state_dict["""shared.weight"""]
return state_dict
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Any ):
UpperCAmelCase : int = checkpoints.load_tax_checkpoint(UpperCamelCase )
UpperCAmelCase : Dict = convert_tax_to_pytorch(UpperCamelCase , num_layers=config.num_layers , is_encoder_only=UpperCamelCase )
UpperCAmelCase : Union[str, Any] = make_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : bool = False ):
UpperCAmelCase : str = TaConfig.from_json_file(UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase : List[str] = TaEncoderModel(UpperCamelCase )
else:
UpperCAmelCase : str = TaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
A: Dict = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
A: List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 359 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCAmelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = """sgugger/tiny-distilbert-classification"""
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , torchscript=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# set architectures equal to `None`
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tinier_bart"""
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = """sshleifer/tinier_bart"""
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """train_time.csv""" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """env.csv""" ) , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """sequential""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """cumulative""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """current""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , """log.txt""" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """log.txt""" ) ).exists() )
| 359 | 1 |
def A_ ( lowercase_ ) -> Optional[int]:
_snake_case : int = 0
for ch in input_str:
_snake_case : Tuple = ord(lowercase__ )
_snake_case : Any = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
'''simple docstring'''
import operator as op
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase__ : Any = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " )
print("-" * (3_0 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " )
else:
UpperCAmelCase__ : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " )
UpperCAmelCase__ : List[Any] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 199 | 0 |
from __future__ import annotations
import math
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Dict = u
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
A : Optional[Any] = temp * (u - i)
return temp
def UpperCAmelCase ( ):
A : Union[str, Any] = int(input("enter the numbers of values: " ) )
A : list[list[float]] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
y[i].append(SCREAMING_SNAKE_CASE_ )
A : Optional[int] = 0
print("enter the values of parameters in a list: " )
A : Optional[int] = list(map(SCREAMING_SNAKE_CASE_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(SCREAMING_SNAKE_CASE_ ):
A : Union[str, Any] = float(input() )
A : List[str] = int(input("enter the value to interpolate: " ) )
A : Any = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
for j in range(n - i ):
A : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
A : int = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
summ += (ucal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A : Any = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def UpperCamelCase_ ( A__ : List[Any] ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 275 |
'''simple docstring'''
from collections.abc import Callable
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Callable | None = None ) -> None:
# Stores actual heap items.
lowerCAmelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase_ : dict = {}
# Stores current size of heap.
lowerCAmelCase_ : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase_ : Tuple = key or (lambda lowerCamelCase : x)
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.arr[j], self.arr[i]
def __lowercase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : int , lowerCamelCase : int ) -> int:
lowerCAmelCase_ : List[str] = self._left(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self._right(lowerCamelCase )
lowerCAmelCase_ : Tuple = i
if left is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : int = left
if right is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = right
return valid_parent
def __lowercase ( self : List[Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Tuple = self._parent(lowerCamelCase )
while parent is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : str = parent, self._parent(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Optional[Any] = self._get_valid_parent(lowerCamelCase )
while valid_parent != index:
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : int = valid_parent, self._get_valid_parent(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : Dict = self.pos_map[item]
lowerCAmelCase_ : Dict = [item, self.key(lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : List[str] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase_ : Tuple = self.arr[self.size - 1]
lowerCAmelCase_ : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase )] )
else:
lowerCAmelCase_ : str = [item, self.key(lowerCamelCase )]
lowerCAmelCase_ : Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : str ) -> tuple | None:
return self.arr[0] if self.size else None
def __lowercase ( self : Optional[Any] ) -> tuple | None:
lowerCAmelCase_ : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (UniPCMultistepScheduler,)
UpperCAmelCase__ : Dict = (('''num_inference_steps''', 25),)
def UpperCamelCase( self , **lowerCamelCase ):
_snake_case = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**lowerCamelCase )
return config
def UpperCamelCase( self , lowerCamelCase=0 , **lowerCamelCase ):
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop("num_inference_steps" , lowerCamelCase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config(**lowerCamelCase )
_snake_case = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
_snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
_snake_case = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
_snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
_snake_case = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_snake_case = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase( self , lowerCamelCase=0 , **lowerCamelCase ):
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop("num_inference_steps" , lowerCamelCase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
_snake_case = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_snake_case = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase( self , lowerCamelCase=None , **lowerCamelCase ):
if scheduler is None:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(**lowerCamelCase )
_snake_case = scheduler_class(**lowerCamelCase )
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(**lowerCamelCase )
_snake_case = scheduler_class(**lowerCamelCase )
_snake_case = 10
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = model(lowerCamelCase , lowerCamelCase )
_snake_case = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def UpperCamelCase( self ):
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop("num_inference_steps" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowerCamelCase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , "set_timesteps" ):
_snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case = scheduler.timesteps[5]
_snake_case = scheduler.timesteps[6]
_snake_case = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_snake_case = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_snake_case = UniPCMultistepScheduler(**self.get_scheduler_config() )
_snake_case = self.full_loop(scheduler=lowerCamelCase )
_snake_case = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case = self.full_loop(scheduler=lowerCamelCase )
_snake_case = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCamelCase( self ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def UpperCamelCase( self ):
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def UpperCamelCase( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def UpperCamelCase( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , )
_snake_case = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase( self ):
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def UpperCamelCase( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def UpperCamelCase( self ):
_snake_case = self.full_loop()
_snake_case = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCamelCase( self ):
_snake_case = self.full_loop(prediction_type="v_prediction" )
_snake_case = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCamelCase( self ):
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
_snake_case = scheduler_class(**lowerCamelCase )
_snake_case = 10
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = model(lowerCamelCase , lowerCamelCase )
_snake_case = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase( self , **lowerCamelCase ):
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config(**lowerCamelCase )
_snake_case = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 368 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__magic_name__ : str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__magic_name__ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE__ )
return next_generation
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Create output image
_snake_case = Image.new("RGB" , (len(cells[0] ), len(SCREAMING_SNAKE_CASE__ )) )
_snake_case = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE__ ) ):
for y in range(len(cells[0] ) ):
_snake_case = 2_55 - cells[y][x] * 2_55
_snake_case = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE__ )
_snake_case = new_generation(SCREAMING_SNAKE_CASE__ )
return images
if __name__ == "__main__":
__magic_name__ : Optional[Any] = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 368 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _UpperCAmelCase ( __A : Any ):
a_ : List[str] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , __A ).groups()[0]
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
a_ : List[str] = file_names
a_ : str = image_transform
a_ : Optional[Any] = label_to_id
def __len__( self : List[str] ) -> Dict:
return len(self.file_names )
def __getitem__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : Union[str, Any] = self.file_names[idx]
a_ : Union[str, Any] = PIL.Image.open(__SCREAMING_SNAKE_CASE )
a_ : int = raw_image.convert('''RGB''' )
if self.image_transform is not None:
a_ : str = self.image_transform(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = extract_label(__SCREAMING_SNAKE_CASE )
if self.label_to_id is not None:
a_ : Any = self.label_to_id[label]
return {"image": image, "label": label}
def _UpperCAmelCase ( __A : List[Any] , __A : Any ):
# Initialize accelerator
if args.with_tracking:
a_ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
a_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : Dict = config['''lr''']
a_ : str = int(config['''num_epochs'''] )
a_ : Any = int(config['''seed'''] )
a_ : Any = int(config['''batch_size'''] )
a_ : Optional[Any] = config['''image_size''']
if not isinstance(__A , (list, tuple) ):
a_ : Union[str, Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
a_ : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a_ : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
a_ : str = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a_ : Dict = os.path.split(__A )[-1].split('''.''' )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
a_ : str = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
a_ : Dict = [extract_label(__A ) for fname in file_names]
a_ : Optional[Any] = list(set(__A ) )
id_to_label.sort()
a_ : Any = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
a_ : Optional[Any] = np.random.permutation(len(__A ) )
a_ : Dict = int(0.8 * len(__A ) )
a_ : Optional[Any] = random_perm[:cut]
a_ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a_ : Optional[int] = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
a_ : str = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
a_ : Any = Compose([Resize(__A ), ToTensor()] )
a_ : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
a_ : Dict = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
a_ : str = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : Union[str, Any] = create_model('''resnet50d''' , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a_ : int = False
for param in model.get_classifier().parameters():
a_ : Tuple = True
# We normalize the batches of images to be a bit faster.
a_ : Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
a_ : Union[str, Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
a_ : Tuple = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ : Tuple = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
a_ : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
a_ : Dict = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
a_ : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a_ : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a_ : Tuple = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a_ : List[str] = os.path.splitext(__A )[0]
if "epoch" in training_difference:
a_ : Tuple = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
a_ : str = None
else:
a_ : List[Any] = int(training_difference.replace('''step_''' , '''''' ) )
a_ : Union[str, Any] = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
a_ : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a_ : Union[str, Any] = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a_ : List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a_ : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
a_ : int = (batch['''image'''] - mean) / std
a_ : Any = model(__A )
a_ : List[Any] = torch.nn.functional.cross_entropy(__A , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
a_ : int = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a_ : Union[str, Any] = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
a_ : str = 0
a_ : List[Any] = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a_ : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
a_ : List[Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
a_ : Union[str, Any] = model(__A )
a_ : List[str] = outputs.argmax(dim=-1 )
a_ , a_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
a_ : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a_ : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_00 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(__A ),
'''epoch''': epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
a_ : Dict = f'epoch_{epoch}'
if args.output_dir is not None:
a_ : int = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _UpperCAmelCase ( ):
a_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=__A , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=__A , default=__A , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=__A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__A , default=__A , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
a_ : Union[str, Any] = parser.parse_args()
a_ : List[Any] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 466 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__UpperCAmelCase = "facebook/wmt19-en-de"
__UpperCAmelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__UpperCAmelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(["Making tiny model"], return_tensors="pt")
__UpperCAmelCase = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
__UpperCAmelCase = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 597 |
import socket
def A__ ( ):
SCREAMING_SNAKE_CASE_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ = socket.gethostname()
SCREAMING_SNAKE_CASE_ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE_ = sock.recv(10_24 )
if not data:
break
out_file.write(__lowerCamelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 597 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class lowerCamelCase ( _A ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = TaTokenizer
snake_case_ = []
def __init__( self , a_=None , a_=None , a_="</s>" , a_="<unk>" , a_="<pad>" , a_=100 , a_=None , **a_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase : Optional[int] = [F'''<extra_id_{i}>''' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase : Union[str, Any] = len(set(filter(lambda a_ : bool("extra_id_" in str(a_ ) ) , a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
a_ , tokenizer_file=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , extra_ids=a_ , additional_special_tokens=a_ , **a_ , )
lowerCAmelCase : Any = vocab_file
lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
lowerCAmelCase : Optional[Any] = extra_ids
@staticmethod
def _lowerCamelCase ( a_ , a_ , a_ ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a_ , )
return max_model_length
def _lowerCamelCase ( self , a_ , a_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Union[str, Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self , a_ , a_ = None ):
lowerCAmelCase : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , a_ , a_ = None ):
lowerCAmelCase : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda a_ : bool(re.search(r"<extra_id_\d+>" , a_ ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(a_ ) for token in self.get_sentinel_tokens()]
| 525 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __A ( a_ : Dict ,a_ : List[Any] ,a_ : List[str]=None ,a_ : Optional[int]=None ,a_ : Any=None ,a_ : Any=None ,a_ : str=None ,a_ : Union[str, Any]=None ,):
if attention_mask is None:
lowerCAmelCase : List[Any] = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
lowerCAmelCase : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=32 , a_=2 , a_=1 , a_=0 , a_=0.02 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : int = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Optional[int] = eos_token_id
lowerCAmelCase : Union[str, Any] = pad_token_id
lowerCAmelCase : Tuple = bos_token_id
lowerCAmelCase : Tuple = initializer_range
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : List[str] = shift_tokens_right(a_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a_ , )
lowerCAmelCase : Tuple = prepare_blenderbot_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Any = 20
lowerCAmelCase : int = model_class_name(a_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase , lowerCAmelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , )
lowerCAmelCase : int = model.decode(a_ , a_ )
lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Union[str, Any] = 20
lowerCAmelCase : Tuple = model_class_name(a_ )
lowerCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase , lowerCAmelCase : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
lowerCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , )
lowerCAmelCase : List[str] = model.decode(a_ , a_ , decoder_attention_mask=a_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
snake_case_ = 99
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : Union[str, Any] = input_ids.shape[0]
lowerCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self._get_config_and_data()
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration(a_ )
lowerCAmelCase : str = lm_model(input_ids=a_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(a_ )
lowerCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = lm_model(input_ids=a_ , decoder_input_ids=a_ )
lowerCAmelCase : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase : Dict = shift_tokens_right(a_ , 1 , 2 )
lowerCAmelCase : Optional[Any] = np.equal(a_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : Any = np.equal(a_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( _A , unittest.TestCase , _A ):
snake_case_ = True
snake_case_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = FlaxBlenderbotModelTester(self )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_ , a_ , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : List[str] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase : str = model_class(a_ )
@jax.jit
def encode_jitted(a_ , a_=None , **a_ ):
return model.encode(input_ids=a_ , attention_mask=a_ )
with self.subTest("JIT Enabled" ):
lowerCAmelCase : List[Any] = encode_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase : Tuple = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : int = model_class(a_ )
lowerCAmelCase : Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCAmelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a_ , a_ , a_ ):
return model.decode(
decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , )
with self.subTest("JIT Enabled" ):
lowerCAmelCase : str = decode_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase : Optional[Any] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : List[str] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : Dict = model(a_ )
self.assertIsNotNone(a_ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCAmelCase : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=a_ )
lowerCAmelCase : List[str] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
lowerCAmelCase : Any = ["Sam"]
lowerCAmelCase : int = tokenizer(a_ , return_tensors="jax" )
lowerCAmelCase : List[Any] = model.generate(**a_ , **a_ )
lowerCAmelCase : Optional[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(a_ , **a_ )
assert generated_txt[0].strip() == tgt_text
| 525 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowercase :
'''simple docstring'''
@property
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.get_dummy_input()
@property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def snake_case_ ( self , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 32
UpperCAmelCase = (32, 32)
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = torch.device(_snake_case )
UpperCAmelCase = (batch_size, num_channels) + sizes
UpperCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case )
UpperCAmelCase = {'''hidden_states''': hidden_states}
if include_temb:
UpperCAmelCase = 128
UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=_snake_case , device=_snake_case )
if include_res_hidden_states_tuple:
UpperCAmelCase = torch.manual_seed(1 )
UpperCAmelCase = (randn_tensor(_snake_case , generator=_snake_case , device=_snake_case ),)
if include_encoder_hidden_states:
UpperCAmelCase = floats_tensor((batch_size, 32, 32) ).to(_snake_case )
if include_skip_sample:
UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_snake_case , device=_snake_case )
return dummy_input
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
UpperCAmelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase = self.block_class(**_snake_case )
unet_block.to(_snake_case )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase = unet_block(**_snake_case )
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCAmelCase = output[0, -1, -3:, -3:]
UpperCAmelCase = torch.tensor(_snake_case ).to(_snake_case )
assert torch_all_close(output_slice.flatten() , _snake_case , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase = self.block_class(**_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase = model(**_snake_case )
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase = output[0]
UpperCAmelCase = torch.device(_snake_case )
UpperCAmelCase = randn_tensor(output.shape , device=_snake_case )
UpperCAmelCase = torch.nn.functional.mse_loss(_snake_case , _snake_case )
loss.backward()
| 708 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCAmelCase ( A__: str , A__: str , A__: str , A__: PreTrainedTokenizer , A__: int , A__: Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase = {}
if train_file is not None:
UpperCAmelCase = [train_file]
if eval_file is not None:
UpperCAmelCase = [eval_file]
if test_file is not None:
UpperCAmelCase = [test_file]
UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=A__ )
UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase = features_name.pop(A__ )
UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase = {label: i for i, label in enumerate(A__ )}
UpperCAmelCase = tokenizer.model_input_names
UpperCAmelCase = {}
if len(A__ ) == 1:
for k in files.keys():
UpperCAmelCase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='''max_length''' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
UpperCAmelCase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='''max_length''' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__magic_name__ = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(metadata={"""help""": """Which column contains the label"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """The path of the training file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """The path of the development file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """The path of the test file"""} )
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__: EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(A__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 391 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Union[str, Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''marian'''
lowerCamelCase : Tuple = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _lowercase : str=5_81_01 , _lowercase : Union[str, Any]=None , _lowercase : Tuple=10_24 , _lowercase : List[Any]=12 , _lowercase : int=40_96 , _lowercase : int=16 , _lowercase : str=12 , _lowercase : List[str]=40_96 , _lowercase : Tuple=16 , _lowercase : List[Any]=0.0 , _lowercase : Any=0.0 , _lowercase : List[Any]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]="gelu" , _lowercase : int=10_24 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : str=0.02 , _lowercase : Tuple=5_81_00 , _lowercase : int=False , _lowercase : Any=5_81_00 , _lowercase : Tuple=0 , _lowercase : Tuple=0 , _lowercase : List[Any]=True , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : int = dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowercase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super(_lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__ : str = min(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = max(_lowercase , _lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def lowercase__ ( self : int , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Any = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = tokenizer.num_special_tokens_to_add(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 35 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __a :
def __init__( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None , snake_case_ : str="resnet50" , snake_case_ : List[Any]=3 , snake_case_ : Optional[int]=32 , snake_case_ : Union[str, Any]=3 , snake_case_ : Tuple=True , snake_case_ : List[str]=True , )-> Optional[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =out_indices if out_indices is not None else [4]
__lowerCAmelCase =stage_names
__lowerCAmelCase =out_features
__lowerCAmelCase =backbone
__lowerCAmelCase =batch_size
__lowerCAmelCase =image_size
__lowerCAmelCase =num_channels
__lowerCAmelCase =use_pretrained_backbone
__lowerCAmelCase =is_training
def UpperCamelCase ( self : int)-> Dict:
__lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase =self.get_config()
return config, pixel_values
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self : str , snake_case_ : int , snake_case_ : Union[str, Any])-> str:
__lowerCAmelCase =TimmBackbone(config=snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase =model(snake_case_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self : List[str])-> Union[str, Any]:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Union[str, Any])-> str:
__lowerCAmelCase =TimmBackboneModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_)
def UpperCamelCase ( self : Tuple)-> Optional[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Any)-> Dict:
__lowerCAmelCase ="""resnet18"""
__lowerCAmelCase ="""microsoft/resnet-18"""
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_)
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ , out_indices=[1, 2, 3])
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""")
def UpperCamelCase ( self : Dict)-> Any:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""")
def UpperCamelCase ( self : Tuple)-> Dict:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""")
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def UpperCamelCase ( self : List[str])-> List[str]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def UpperCamelCase ( self : int)-> int:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""")
def UpperCamelCase ( self : Dict)-> int:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def UpperCamelCase ( self : Any)-> Dict:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def UpperCamelCase ( self : int)-> Tuple:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def UpperCamelCase ( self : Optional[int])-> Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def UpperCamelCase ( self : Optional[Any])-> Optional[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def UpperCamelCase ( self : Any)-> List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""")
def UpperCamelCase ( self : Any)-> Tuple:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""")
def UpperCamelCase ( self : List[Any])-> str:
pass
@unittest.skip("""Safetensors is not supported by timm.""")
def UpperCamelCase ( self : Dict)-> Optional[int]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase ( self : Optional[int])-> Tuple:
pass
def UpperCamelCase ( self : int)-> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =model_class(snake_case_)
__lowerCAmelCase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase =[*signature.parameters.keys()]
__lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_)
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =True
__lowerCAmelCase =self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase =self.all_model_classes[0]
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
__lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_)
__lowerCAmelCase =model(**snake_case_)
__lowerCAmelCase =outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def UpperCamelCase ( self : Tuple)-> List[Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(**snake_case_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase =copy.deepcopy(snake_case_)
__lowerCAmelCase =None
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(**snake_case_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__lowerCAmelCase =copy.deepcopy(snake_case_)
__lowerCAmelCase =False
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(**snake_case_)
| 354 | 0 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( _snake_case : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
_A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
_A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(_snake_case )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def _snake_case ( _snake_case : dict ) -> dict:
'''simple docstring'''
_A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
_A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
_A = data['First sentence']['value']
for key, value in data.items():
if isinstance(_snake_case , _snake_case ):
_A = ', '.join(_snake_case )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
a = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
a = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('''\n'''.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 505 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _snake_case ( _snake_case : str , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=None ) -> int:
'''simple docstring'''
_A = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_A , _A = True, True
_A = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = 0
_A = -1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_A = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _snake_case ( _snake_case : Tuple , _snake_case : Any ) -> Tuple:
'''simple docstring'''
_A = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_A , _A = check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_A = 1
if check == 2:
_A = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_A = dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def _snake_case ( ) -> str:
'''simple docstring'''
_A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_A = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_A = {
1: [],
2: []
# all degree is zero
}
_A = 10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 505 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Union[str, Any] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 108 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase : int = logging.get_logger(__name__)
# General docstring
UpperCAmelCase : str = """ResNetConfig"""
# Base docstring
UpperCAmelCase : List[Any] = """microsoft/resnet-50"""
UpperCAmelCase : Union[str, Any] = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase : Tuple = """microsoft/resnet-50"""
UpperCAmelCase : Union[str, Any] = """tiger cat"""
UpperCAmelCase : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Dict ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :List[Any] ,__UpperCAmelCase :Optional[Any] = 3 ,__UpperCAmelCase :Optional[Any] = 1 ,__UpperCAmelCase :Tuple = "relu" ) -> str:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Dict = nn.Convad(
__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=__UpperCAmelCase ,stride=__UpperCAmelCase ,padding=kernel_size // 2 ,bias=__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.BatchNormad(__UpperCAmelCase )
lowerCamelCase__ : Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self :Tuple ,__UpperCAmelCase :str ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = self.convolution(__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.normalization(__UpperCAmelCase )
lowerCamelCase__ : Dict = self.activation(__UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Union[str, Any] ,__UpperCAmelCase :str ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Union[str, Any] = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
lowerCamelCase__ : Any = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
lowerCamelCase__ : List[Any] = config.num_channels
def lowercase_ ( self :Tuple ,__UpperCAmelCase :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCamelCase__ : int = self.embedder(__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.pooler(__UpperCAmelCase )
return embedding
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Optional[Any] ,__UpperCAmelCase :int ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Optional[int] = 2 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Tuple = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,stride=__UpperCAmelCase ,bias=__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = nn.BatchNormad(__UpperCAmelCase )
def lowercase_ ( self :str ,__UpperCAmelCase :List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = self.convolution(__UpperCAmelCase )
lowerCamelCase__ : List[str] = self.normalization(__UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Tuple ,__UpperCAmelCase :List[str] ,__UpperCAmelCase :List[str] ,__UpperCAmelCase :Any = 1 ,__UpperCAmelCase :Dict = "relu" ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Any = in_channels != out_channels or stride != 1
lowerCamelCase__ : Union[str, Any] = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ : int = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,activation=__UpperCAmelCase ) ,)
lowerCamelCase__ : Any = ACTaFN[activation]
def lowercase_ ( self :Tuple ,__UpperCAmelCase :List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = hidden_state
lowerCamelCase__ : Optional[int] = self.layer(__UpperCAmelCase )
lowerCamelCase__ : List[str] = self.shortcut(__UpperCAmelCase )
hidden_state += residual
lowerCamelCase__ : Tuple = self.activation(__UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Union[str, Any] ,__UpperCAmelCase :Any ,__UpperCAmelCase :List[str] ,__UpperCAmelCase :int = 1 ,__UpperCAmelCase :int = "relu" ,__UpperCAmelCase :str = 4 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : int = in_channels != out_channels or stride != 1
lowerCamelCase__ : Any = out_channels // reduction
lowerCamelCase__ : List[str] = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ : List[Any] = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,activation=__UpperCAmelCase ) ,)
lowerCamelCase__ : str = ACTaFN[activation]
def lowercase_ ( self :Tuple ,__UpperCAmelCase :List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = hidden_state
lowerCamelCase__ : Dict = self.layer(__UpperCAmelCase )
lowerCamelCase__ : int = self.shortcut(__UpperCAmelCase )
hidden_state += residual
lowerCamelCase__ : List[str] = self.activation(__UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Tuple ,__UpperCAmelCase :int ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Optional[Any] = 2 ,__UpperCAmelCase :Optional[int] = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Any = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowerCamelCase__ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ,activation=config.hidden_act ) ,*[layer(__UpperCAmelCase ,__UpperCAmelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = input
for layer in self.layers:
lowerCamelCase__ : Tuple = layer(__UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self :Union[str, Any] ,__UpperCAmelCase :List[str] ) -> int:
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowerCamelCase__ : List[str] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCAmelCase ,config.depths[1:] ):
self.stages.append(ResNetStage(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,depth=__UpperCAmelCase ) )
def lowercase_ ( self :Tuple ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Tuple = False ,__UpperCAmelCase :Any = True ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ : Tuple = hidden_states + (hidden_state,)
lowerCamelCase__ : Dict = stage_module(__UpperCAmelCase )
if output_hidden_states:
lowerCamelCase__ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase ,hidden_states=__UpperCAmelCase ,)
class __SCREAMING_SNAKE_CASE ( snake_case_ ):
UpperCAmelCase = ResNetConfig
UpperCAmelCase = """resnet"""
UpperCAmelCase = """pixel_values"""
UpperCAmelCase = True
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Tuple ) -> List[str]:
"""simple docstring"""
if isinstance(__UpperCAmelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :int ,__UpperCAmelCase :Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : Dict = value
UpperCAmelCase : List[str] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase : Tuple = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , snake_case_ , )
class __SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self :Dict ,__UpperCAmelCase :Dict ) -> Tuple:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = config
lowerCamelCase__ : Optional[int] = ResNetEmbeddings(__UpperCAmelCase )
lowerCamelCase__ : str = ResNetEncoder(__UpperCAmelCase )
lowerCamelCase__ : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowercase_ ( self :int ,__UpperCAmelCase :Dict ,__UpperCAmelCase :Any = None ,__UpperCAmelCase :List[Any] = None ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Optional[int] = self.embedder(__UpperCAmelCase )
lowerCamelCase__ : int = self.encoder(
__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
lowerCamelCase__ : List[str] = encoder_outputs[0]
lowerCamelCase__ : str = self.pooler(__UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase ,pooler_output=__UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
class __SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self :Optional[Any] ,__UpperCAmelCase :str ) -> str:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = config.num_labels
lowerCamelCase__ : Tuple = ResNetModel(__UpperCAmelCase )
# classification head
lowerCamelCase__ : Tuple = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :List[str] = None ,__UpperCAmelCase :str = None ,__UpperCAmelCase :int = None ,__UpperCAmelCase :List[Any] = None ,) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : List[Any] = self.resnet(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ : Tuple = self.classifier(__UpperCAmelCase )
lowerCamelCase__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ : Tuple = '''single_label_classification'''
else:
lowerCamelCase__ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCamelCase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ : Tuple = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCamelCase__ : List[Any] = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ : int = CrossEntropyLoss()
lowerCamelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ : Optional[int] = BCEWithLogitsLoss()
lowerCamelCase__ : int = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
if not return_dict:
lowerCamelCase__ : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase ,logits=__UpperCAmelCase ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , snake_case_ , )
class __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
def __init__( self :int ,__UpperCAmelCase :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
super()._init_backbone(__UpperCAmelCase )
lowerCamelCase__ : str = [config.embedding_size] + config.hidden_sizes
lowerCamelCase__ : List[Any] = ResNetEmbeddings(__UpperCAmelCase )
lowerCamelCase__ : List[str] = ResNetEncoder(__UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@replace_return_docstrings(output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :int ,__UpperCAmelCase :Dict = None ,__UpperCAmelCase :Optional[Any] = None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Tuple = self.embedder(__UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.encoder(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase__ : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__UpperCAmelCase ,)
| 701 | """simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
UpperCAmelCase : Dict[Optional[str], str] = {}
UpperCAmelCase : Dict[Optional[str], Exception] = {}
def __a ( _lowercase , _lowercase , _lowercase = None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowerCamelCase__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowerCamelCase__ : Any = format_type
def __a ( _lowercase , _lowercase , _lowercase = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase__ : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
UpperCAmelCase : Optional[int] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
UpperCAmelCase : Dict = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
UpperCAmelCase : Union[str, Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __a ( _lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __a ( _lowercase , **_lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_format_type_from_alias(_lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 121 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
A_ = TypeVar("""_T""")
class __lowerCamelCase ( Generic[_T] ):
def __init__( self , UpperCAmelCase = None ):
lowerCamelCase_ = list(iterable or [] )
lowerCamelCase_ = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def UpperCAmelCase__ ( self , UpperCAmelCase ):
self._stacka.append(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self._stacka.pop
lowerCamelCase_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ = logging.get_logger(__name__)
A_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase( __a ):
'''simple docstring'''
def __init__( self: List[str], a_: Dict=None, a_: int=None, *a_: List[Any], **a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(*a_, **a_ )
if config is None:
assert isinstance(self.model, a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_snake_case : Any = self.model.config
else:
_snake_case : int = config
_snake_case : Union[str, Any] = data_args
_snake_case : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
_snake_case : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_snake_case : Dict = label_smoothed_nll_loss
def UpperCamelCase_ ( self: int, a_: int ):
'''simple docstring'''
if self.optimizer is None:
_snake_case : Optional[Any] = ["""bias""", """LayerNorm.weight"""]
_snake_case : Optional[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_snake_case : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_snake_case : str = Adafactor
_snake_case : List[Any] = {"""scale_parameter""": False, """relative_step""": False}
else:
_snake_case : Any = AdamW
_snake_case : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_snake_case : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_snake_case : Dict = OSS(
params=a_, optim=a_, **a_, )
else:
_snake_case : Union[str, Any] = optimizer_cls(a_, **a_ )
if self.lr_scheduler is None:
_snake_case : Optional[int] = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCamelCase_ ( self: Dict, a_: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_snake_case : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_snake_case : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
_snake_case : Tuple = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ )
return scheduler
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase_ ( self: List[str], a_: int, a_: Optional[int], a_: str ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_snake_case : int = model(**a_, use_cache=a_ )[0]
_snake_case : Union[str, Any] = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
_snake_case , _snake_case : Optional[Any] = model(**a_, labels=a_, use_cache=a_ )[:2]
else:
# compute label smoothed loss
_snake_case : Union[str, Any] = model(**a_, use_cache=a_ )[0]
_snake_case : Optional[Any] = torch.nn.functional.log_softmax(a_, dim=-1 )
_snake_case , _snake_case : List[Any] = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = inputs.pop("""labels""" )
_snake_case , _snake_case : str = self._compute_loss(a_, a_, a_ )
return loss
def UpperCamelCase_ ( self: Optional[int], a_: nn.Module, a_: Dict[str, Union[torch.Tensor, Any]], a_: bool, a_: Optional[List[str]] = None, ):
'''simple docstring'''
_snake_case : str = self._prepare_inputs(a_ )
_snake_case : List[str] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_snake_case : List[str] = self.model.generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], **a_, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Union[str, Any] = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
_snake_case : Tuple = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_snake_case , _snake_case : Dict = self._compute_loss(a_, a_, a_ )
_snake_case : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_snake_case : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Tuple = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
_snake_case : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
_snake_case : Tuple = tensor
return padded_tensor
| 609 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class A_ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def lowercase_ ( __UpperCAmelCase ) -> int:
a : int = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
a : Tuple = kwargs.pop('main_process_only' , __UpperCAmelCase )
a : int = kwargs.pop('in_order' , __UpperCAmelCase )
if self.isEnabledFor(__UpperCAmelCase ):
if self._should_log(__UpperCAmelCase ):
a , a : Dict = self.process(__UpperCAmelCase , __UpperCAmelCase )
self.logger.log(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif in_order:
a : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
a , a : int = self.process(__UpperCAmelCase , __UpperCAmelCase )
self.logger.log(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
state.wait_for_everyone()
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[Any]:
if log_level is None:
a : str = os.environ.get('ACCELERATE_LOG_LEVEL' , UpperCAmelCase__ )
a : Union[str, Any] = logging.getLogger(UpperCAmelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase__ , {} )
| 509 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"camembert-base": 512,
}
SCREAMING_SNAKE_CASE__ : int = "▁"
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : int = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ["input_ids", "attention_mask"]
lowercase : Union[str, Any] = CamembertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
a : List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
a : Optional[int] = vocab_file
a : Union[str, Any] = False if not self.vocab_file else True
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
a : Tuple = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a : Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 509 | 1 |
"""simple docstring"""
from math import factorial
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
A = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A = float(factorial(snake_case__ ) )
coefficient /= factorial(snake_case__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 91 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
UpperCamelCase =[]
UpperCamelCase =[]
UpperCamelCase ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
UpperCamelCase =[
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
UpperCamelCase =0
for log in Path().glob("*.log"):
UpperCamelCase =0
with open(log, "r") as f:
for line in f:
UpperCamelCase =json.loads(line)
if line.get("nodeid", "") != "":
UpperCamelCase =line["nodeid"]
if line.get("duration", None) is not None:
UpperCamelCase =f"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase =[]
log.unlink()
UpperCamelCase =""
UpperCamelCase =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase =[]
UpperCamelCase ={}
for test in failed_tests:
UpperCamelCase =test[0].split("::")
UpperCamelCase =data[0].split("/")[-1]
if data[0] not in filesafailed:
UpperCamelCase =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase =[test[0] for test in failed_table]
UpperCamelCase =list(set(files))
# Count number of instances in failed_tests
UpperCamelCase =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase =tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCamelCase ="Too many failed tests, please see the full report in the Action results."
UpperCamelCase =len(err) + 10
UpperCamelCase =message[: 3000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
UpperCamelCase ="No failed tests! 🤗"
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
UpperCamelCase =WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
UpperCamelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
UpperCamelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
UpperCamelCase ={
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
UpperCamelCase =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
UpperCamelCase =response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase =""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase =row[0]
else:
UpperCamelCase =""
UpperCamelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 208 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 519 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 7_68 , ):
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) )
_snake_case : Optional[Any] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
"""simple docstring"""
_snake_case : Dict = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
_snake_case : List[Any] = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
return self
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : int = (embeds - self.mean) * 1.0 / self.std
return embeds
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Optional[Any] = (embeds * self.std) + self.mean
return embeds
| 519 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCAmelCase( unittest.TestCase , lowerCamelCase ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = load_tool('''text-classification''')
self.tool.setup()
_UpperCamelCase = load_tool('''text-classification''' , remote=__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
| 19 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase__ :
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
return None
class lowercase__ :
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ):
return None
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowercase , """tf""" , 12 , **_lowercase )
@require_torch
@slow
def UpperCAmelCase__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowercase , """pt""" , 12 , **_lowercase )
@require_torch
@slow
def UpperCAmelCase__ ( self ):
from transformers import BertModel
lowerCAmelCase_ : Tuple = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(_lowercase ) )
vocab_file.flush()
lowerCAmelCase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase_ : List[str] = BertModel(BertConfig(vocab_size=len(_lowercase ) ) )
model.save_pretrained(_lowercase )
self._test_export(_lowercase , """pt""" , 12 , _lowercase )
@require_tf
@slow
def UpperCAmelCase__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase_ : str = self._test_export(_lowercase , """tf""" , 12 , **_lowercase )
lowerCAmelCase_ : List[Any] = quantize(Path(_lowercase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def UpperCAmelCase__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase_ : List[Any] = self._test_export(_lowercase , """pt""" , 12 , **_lowercase )
lowerCAmelCase_ : Optional[int] = quantize(_lowercase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ : Optional[int] = Path(_lowercase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase )
return path
except Exception as e:
self.fail(_lowercase )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase__ ( self ):
from transformers import BertModel
lowerCAmelCase_ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_lowercase , _lowercase , """pt""" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase__ ( self ):
from transformers import TFBertModel
lowerCAmelCase_ : str = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase_ : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_lowercase , _lowercase , """tf""" )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : List[Any] = FeatureExtractionPipeline(_lowercase , _lowercase )
lowerCAmelCase_ : Union[str, Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCAmelCase_ : List[Any] = infer_shapes(_lowercase , _lowercase )
# Assert all variables are present
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _lowercase )
self.assertSequenceEqual(variable_names[3:] , _lowercase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCAmelCase_ : List[str] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCAmelCase_ : Optional[int] = ensure_valid_input(FuncContiguousArgs() , _lowercase , _lowercase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_lowercase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_lowercase ) , set(_lowercase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_lowercase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase_ : str = ensure_valid_input(FuncNonContiguousArgs() , _lowercase , _lowercase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_lowercase ) , 1 )
self.assertEqual(len(_lowercase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 706 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase__ ( __A ):
__UpperCamelCase = """perceiver"""
def __init__( self , _lowercase=256 , _lowercase=1_280 , _lowercase=768 , _lowercase=1 , _lowercase=26 , _lowercase=8 , _lowercase=8 , _lowercase=None , _lowercase=None , _lowercase="kv" , _lowercase=1 , _lowercase=1 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=True , _lowercase=262 , _lowercase=2_048 , _lowercase=56 , _lowercase=[368, 496] , _lowercase=16 , _lowercase=1_920 , _lowercase=16 , _lowercase=[1, 16, 224, 224] , **_lowercase , ):
super().__init__(**_lowercase )
lowerCAmelCase_ : Optional[int] = num_latents
lowerCAmelCase_ : List[str] = d_latents
lowerCAmelCase_ : int = d_model
lowerCAmelCase_ : Dict = num_blocks
lowerCAmelCase_ : Union[str, Any] = num_self_attends_per_block
lowerCAmelCase_ : List[str] = num_self_attention_heads
lowerCAmelCase_ : List[str] = num_cross_attention_heads
lowerCAmelCase_ : List[Any] = qk_channels
lowerCAmelCase_ : Optional[Any] = v_channels
lowerCAmelCase_ : Optional[Any] = cross_attention_shape_for_attention
lowerCAmelCase_ : Optional[int] = self_attention_widening_factor
lowerCAmelCase_ : int = cross_attention_widening_factor
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_query_residual
# masked language modeling attributes
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[Any] = max_position_embeddings
# image classification attributes
lowerCAmelCase_ : List[Any] = image_size
# flow attributes
lowerCAmelCase_ : Dict = train_size
# multimodal autoencoding attributes
lowerCAmelCase_ : Optional[Any] = num_frames
lowerCAmelCase_ : int = audio_samples_per_frame
lowerCAmelCase_ : Any = samples_per_patch
lowerCAmelCase_ : Any = output_shape
class lowercase__ ( __A ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCAmelCase_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ):
return 1e-4
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 40 , _lowercase = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowercase , _lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : int = preprocessor.num_special_tokens_to_add(_lowercase )
lowerCAmelCase_ : int = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Optional[int] = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowerCAmelCase_ : Optional[Any] = dict(preprocessor(_lowercase , return_tensors=_lowercase ) )
lowerCAmelCase_ : Optional[Any] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowercase , _lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Any = compute_effective_axis_dimension(_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase_ : str = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ : List[str] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
lowerCAmelCase_ : Tuple = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 440 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = CTRLTokenizer
_lowercase : Dict = False
_lowercase : int = False
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Dict , **UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 43 |
def __a ( __UpperCAmelCase : int = 100 ) -> int:
"""simple docstring"""
lowerCamelCase_ : Any = set()
lowerCamelCase_ : int = 0
lowerCamelCase_ : Tuple = n + 1 # maximum limit
for a in range(2 , __UpperCAmelCase ):
for b in range(2 , __UpperCAmelCase ):
lowerCamelCase_ : List[Any] = a**b # calculates the current power
collect_powers.add(__UpperCAmelCase ) # adds the result to the set
return len(__UpperCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 488 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
snake_case = 'src/transformers'
snake_case = 'docs/source/en'
snake_case = '.'
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
snake_case = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
snake_case = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
snake_case = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 2 if text == '✅' or text == '❌' else len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = (width - text_length) // 2
SCREAMING_SNAKE_CASE = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase_ ( ):
SCREAMING_SNAKE_CASE = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE = {name: config.replace('Config', '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = None
if attr_name.endswith('Tokenizer' ):
SCREAMING_SNAKE_CASE = slow_tokenizers
SCREAMING_SNAKE_CASE = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
SCREAMING_SNAKE_CASE = fast_tokenizers
SCREAMING_SNAKE_CASE = attr_name[:-1_3]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
SCREAMING_SNAKE_CASE = tf_models
SCREAMING_SNAKE_CASE = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
SCREAMING_SNAKE_CASE = flax_models
SCREAMING_SNAKE_CASE = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
SCREAMING_SNAKE_CASE = pt_models
SCREAMING_SNAKE_CASE = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE = ''.join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
SCREAMING_SNAKE_CASE = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE = '|' + '|'.join([_center_text(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE = {True: '✅', False: '❌'}
for name in model_names:
SCREAMING_SNAKE_CASE = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_=False ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_, 'index.md' ), start_prompt='<!--This table is updated automatically from the auto modules', end_prompt='<!-- End table-->', )
SCREAMING_SNAKE_CASE = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_, 'index.md' ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 702 |
"""simple docstring"""
snake_case = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case = [None] * 1_0_0_0_0_0_0_0
snake_case = True
snake_case = False
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = number_chain
while number < 1_0_0_0_0_0_0_0:
SCREAMING_SNAKE_CASE = number_chain
number *= 1_0
return number_chain
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0_0 ):
for i in range(1, SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 406 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
class A( __lowercase ):
'''simple docstring'''
def a__ ( self : int , A_ : Dict , A_ : Any , A_ : Union[str, Any]=None , A_ : Optional[Any]=None ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.layer[current_layer](A_ , A_ , head_mask[current_layer] )
lowerCamelCase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class A( __lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : str ) -> Dict:
"""simple docstring"""
super().__init__(A_ )
lowerCamelCase_ = BertEncoderWithPabee(A_ )
self.init_weights()
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = threshold
def a__ ( self : List[str] , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = patience
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.inference_layers_num / self.inference_instances_num
lowerCamelCase_ = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(A_ )
@add_start_docstrings_to_model_forward(A_ )
def a__ ( self : List[Any] , A_ : List[str]=None , A_ : List[Any]=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : int=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Optional[Any]=None , A_ : List[str]=None , A_ : Optional[Any]=None , A_ : Optional[int]=False , ) -> str:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCamelCase_ = input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase_ = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCamelCase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase_ = torch.ones(A_ , device=A_ )
if token_type_ids is None:
lowerCamelCase_ = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase_ = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCamelCase_ = encoder_hidden_states.size()
lowerCamelCase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCamelCase_ = torch.ones(A_ , device=A_ )
lowerCamelCase_ = self.invert_attention_mask(A_ )
else:
lowerCamelCase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase_ = self.get_head_mask(A_ , self.config.num_hidden_layers )
lowerCamelCase_ = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
lowerCamelCase_ = embedding_output
if self.training:
lowerCamelCase_ = []
for i in range(self.config.num_hidden_layers ):
lowerCamelCase_ = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
lowerCamelCase_ = self.pooler(A_ )
lowerCamelCase_ = output_layers[i](output_dropout(A_ ) )
res.append(A_ )
elif self.patience == 0: # Use all layers for inference
lowerCamelCase_ = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
lowerCamelCase_ = self.pooler(encoder_outputs[0] )
lowerCamelCase_ = [output_layers[self.config.num_hidden_layers - 1](A_ )]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = None
lowerCamelCase_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCamelCase_ = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
lowerCamelCase_ = self.pooler(A_ )
lowerCamelCase_ = output_layers[i](A_ )
if regression:
lowerCamelCase_ = logits.detach()
if patient_result is not None:
lowerCamelCase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCamelCase_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A_ ) ):
patient_counter += 1
else:
lowerCamelCase_ = 0
lowerCamelCase_ = logits
if patient_counter == self.patience:
break
lowerCamelCase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , __lowercase , )
class A( __lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A_ : str ) -> Any:
"""simple docstring"""
super().__init__(A_ )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = BertModelWithPabee(A_ )
lowerCamelCase_ = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def a__ ( self : List[str] , A_ : Dict=None , A_ : str=None , A_ : Union[str, Any]=None , A_ : str=None , A_ : Dict=None , A_ : str=None , A_ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.bert(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCamelCase_ = (logits[-1],)
if labels is not None:
lowerCamelCase_ = None
lowerCamelCase_ = 0
for ix, logits_item in enumerate(A_ ):
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ = MSELoss()
lowerCamelCase_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCamelCase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCamelCase_ = (total_loss / total_weights,) + outputs
return outputs
| 70 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=lowercase__ ):
lowerCamelCase : Any = ['''note_seq''']
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ) -> int:
requires_backends(self , ['note_seq'] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ) -> Dict:
requires_backends(cls , ['note_seq'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : str ) -> Union[str, Any]:
requires_backends(cls , ['note_seq'] )
| 705 |
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks if the entire collection has been sorted
if len(lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase , n - 1 )
rec_insertion_sort(lowerCamelCase , n - 1 )
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks order between adjacent elements
if index >= len(lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase , index + 1 )
if __name__ == "__main__":
__snake_case =input("""Enter integers separated by spaces: """)
__snake_case =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 513 | 0 |
'''simple docstring'''
import sys
lowercase =(
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Any =1
for digit in s:
product *= int(__lowerCamelCase )
return product
def lowerCamelCase__ ( __lowerCamelCase : str = N ):
'''simple docstring'''
_UpperCAmelCase : Tuple =-sys.maxsize - 1
_UpperCAmelCase : Tuple =n[:1_3]
_UpperCAmelCase : List[Any] =1_3
while cur_index < len(__lowerCamelCase ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
_UpperCAmelCase : Optional[Any] =substr[1:] + n[cur_index]
cur_index += 1
else:
_UpperCAmelCase : Dict =max(__lowerCamelCase , str_eval(__lowerCamelCase ) )
_UpperCAmelCase : Optional[int] =n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 446 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =set()
# Replace all the whitespace in our sentence
_UpperCAmelCase : Dict =input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 2_6
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[False] * 2_6
for char in input_str:
if char.islower():
_UpperCAmelCase : Dict =True
elif char.isupper():
_UpperCAmelCase : List[str] =True
return all(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def lowerCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
_UpperCAmelCase : List[Any] ='from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__lowerCamelCase ) )
print(timeit('is_pangram_faster()' , setup=__lowerCamelCase ) )
print(timeit('is_pangram_fastest()' , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 446 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =data
__UpperCAmelCase =[0x67_452_301, 0xEF_CDA_B89, 0x98_BAD_CFE, 0x10_325_476, 0xC3_D2E_1F0]
@staticmethod
def A__ (UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
return ((n << b) | (n >> (3_2 - b))) & 0xFF_FFF_FFF
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data) + 8) % 6_4)
__UpperCAmelCase =self.data + padding + struct.pack('''>Q''' , 8 * len(self.data))
return padded_data
def A__ (self):
'''simple docstring'''
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data) , 6_4)
]
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =list(struct.unpack('''>16L''' , _lowerCAmelCase)) + [0] * 6_4
for i in range(1_6 , 8_0):
__UpperCAmelCase =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1)
return w
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.padding()
__UpperCAmelCase =self.split_blocks()
for block in self.blocks:
__UpperCAmelCase =self.expand_block(_lowerCAmelCase)
__UpperCAmelCase =self.h
for i in range(0 , 8_0):
if 0 <= i < 2_0:
__UpperCAmelCase =(b & c) | ((~b) & d)
__UpperCAmelCase =0x5A_827_999
elif 2_0 <= i < 4_0:
__UpperCAmelCase =b ^ c ^ d
__UpperCAmelCase =0x6E_D9E_BA1
elif 4_0 <= i < 6_0:
__UpperCAmelCase =(b & c) | (b & d) | (c & d)
__UpperCAmelCase =0x8F_1BB_CDC
elif 6_0 <= i < 8_0:
__UpperCAmelCase =b ^ c ^ d
__UpperCAmelCase =0xCA_62C_1D6
__UpperCAmelCase =(
self.rotate(_lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0xFF_FFF_FFF,
a,
self.rotate(_lowerCAmelCase , 3_0),
c,
d,
)
__UpperCAmelCase =(
self.h[0] + a & 0xFF_FFF_FFF,
self.h[1] + b & 0xFF_FFF_FFF,
self.h[2] + c & 0xFF_FFF_FFF,
self.h[3] + d & 0xFF_FFF_FFF,
self.h[4] + e & 0xFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__UpperCAmelCase =B"""Test String"""
assert SHAaHash(__lowerCamelCase ).final_hash() == hashlib.shaa(__lowerCamelCase ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE ( ) -> str:
__UpperCAmelCase =argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__UpperCAmelCase =f.read()
else:
__UpperCAmelCase =bytes(__lowerCamelCase , '''utf-8''' )
print(SHAaHash(__lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 132 |
from importlib import import_module
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_logger(__name__)
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase__ : Tuple = module._original_module if isinstance(_lowerCAmelCase , _PatchedModuleObj ) else module
class UpperCAmelCase_ :
__lowerCamelCase = []
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : str = obj
UpperCAmelCase__ : List[str] = target
UpperCAmelCase__ : List[str] = new
UpperCAmelCase__ : Any = target.split(""".""" )[0]
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : str = attrs or []
def __enter__( self ):
*UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_lowerCAmelCase ) ):
try:
UpperCAmelCase__ : Optional[int] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCAmelCase__ : Any = getattr(self.obj , _lowerCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCAmelCase__ : List[Any] = obj_attr
# patch at top level
setattr(self.obj , _lowerCAmelCase , _PatchedModuleObj(_lowerCAmelCase , attrs=self.attrs ) )
UpperCAmelCase__ : Optional[Any] = getattr(self.obj , _lowerCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_lowerCAmelCase , _lowerCAmelCase , _PatchedModuleObj(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , attrs=self.attrs ) )
UpperCAmelCase__ : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
# finally set the target attribute
setattr(_lowerCAmelCase , _lowerCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCAmelCase__ : Union[str, Any] = getattr(import_module(""".""".join(_lowerCAmelCase ) ) , _lowerCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _lowerCAmelCase ) is attr_value:
UpperCAmelCase__ : Optional[int] = getattr(self.obj , _lowerCAmelCase )
setattr(self.obj , _lowerCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCAmelCase__ : Dict = globals()["""__builtins__"""][target_attr]
setattr(self.obj , _lowerCAmelCase , self.new )
else:
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self , *_lowerCAmelCase ):
for attr in list(self.original ):
setattr(self.obj , _lowerCAmelCase , self.original.pop(_lowerCAmelCase ) )
def __UpperCAmelCase ( self ):
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 79 | 0 |
def _lowercase ( a_ : Tuple ) -> int:
__magic_name__ = []
__magic_name__ = set({'(', '[', '{'} )
__magic_name__ = set({')', ']', '}'} )
__magic_name__ = {'{': '}', '[': ']', '(': ')'}
for i in range(len(a_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a_ ) == 0 or (len(a_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a_ ) == 0
def _lowercase ( ) -> List[Any]:
__magic_name__ = input('Enter sequence of brackets: ' )
if is_balanced(a_ ):
print(a_ ,'is balanced' )
else:
print(a_ ,'is not balanced' )
if __name__ == "__main__":
main()
| 715 |
import requests
from bsa import BeautifulSoup
def _lowercase ( a_ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
__magic_name__ = BeautifulSoup(requests.get(a_ ).text ,'html.parser' )
__magic_name__ = soup.findAll('h1' )
__magic_name__ = soup.findAll('div' ,{'class': 'maincounter-number'} )
keys += soup.findAll('span' ,{'class': 'panel-title'} )
values += soup.findAll('div' ,{'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(a_ ,a_ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 184 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ ,'r' ) as f:
A_ = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any]="<unk>" , _lowercase : str="<cls>" , _lowercase : Dict="<pad>" , _lowercase : Any="<mask>" , _lowercase : Dict="<eos>" , **_lowercase : Dict , ) -> int:
super().__init__(**_lowercase)
A_ = load_vocab_file(_lowercase)
A_ = dict(enumerate(self.all_tokens))
A_ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
A_ = unk_token
A_ = cls_token
A_ = pad_token
A_ = mask_token
A_ = eos_token
A_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def __snake_case ( self : Union[str, Any] , _lowercase : int) -> str:
return self._id_to_token.get(_lowercase , self.unk_token)
def __snake_case ( self : Tuple , _lowercase : str) -> int:
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token))
def __snake_case ( self : List[Any] , _lowercase : List[str] , **_lowercase : int) -> Dict:
return text.split()
def __snake_case ( self : int , _lowercase : int=False) -> List[str]:
return len(self._id_to_token)
def __snake_case ( self : int) -> Union[str, Any]:
return {token: i for i, token in enumerate(self.all_tokens)}
def __snake_case ( self : Tuple , _lowercase : str) -> int:
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token))
def __snake_case ( self : Union[str, Any] , _lowercase : int) -> str:
return self._id_to_token.get(_lowercase , self.unk_token)
def __snake_case ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None) -> List[int]:
A_ = [self.cls_token_id]
A_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __snake_case ( self : Union[str, Any] , _lowercase : List , _lowercase : Optional[List] = None , _lowercase : bool = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A_ = [1] + ([0] * len(_lowercase)) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowercase) + [1]
return mask
def __snake_case ( self : Dict , _lowercase : str , _lowercase : Optional[int]) -> Dict:
A_ = os.path.join(_lowercase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(_lowercase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def __snake_case ( self : int) -> int:
return self.get_vocab_size(with_added_tokens=_lowercase)
def __snake_case ( self : Any , _lowercase : Union[List[str], List[AddedToken]] , _lowercase : bool = False) -> int:
return super()._add_tokens(_lowercase , special_tokens=_lowercase)
| 366 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __snake_case ( self : str) -> Optional[int]:
A_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused')
A_ = load_dataset('ashraq/esc50')
A_ = dataset['train']['audio'][-1]['array']
A_ = audio_classifier(_lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_lowercase) , [{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF')
def __snake_case ( self : Tuple) -> str:
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any]) -> Tuple:
A_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
A_ = load_dataset('ashraq/esc50')
A_ = dataset['train']['audio'][-1]['array']
A_ = audio_classifier(_lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_lowercase) , [
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
] , )
A_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_lowercase) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
A_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5)
self.assertEqual(
nested_simplify(_lowercase) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF')
def __snake_case ( self : List[str]) -> str:
pass
| 366 | 1 |
import baseaa
def lowercase_ ( __UpperCAmelCase ) -> bytes:
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def lowercase_ ( __UpperCAmelCase ) -> str:
return baseaa.aaadecode(__UpperCAmelCase ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_A = logging.get_logger("""transformers.models.speecht5""")
_A = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_A = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_A = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_A = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_A = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_A = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_A = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_A = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_A = []
_A = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Dict = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : int = value
elif weight_type == "bias":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : int = value
elif weight_type == "running_var":
lowerCAmelCase__ : Dict = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : List[str] = value
else:
lowerCAmelCase__ : List[str] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = []
if task == "s2t":
lowerCAmelCase__ : int = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ : Optional[int] = MAPPING_S2T
lowerCAmelCase__ : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Tuple = MAPPING_T2S
lowerCAmelCase__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ : Union[str, Any] = MAPPING_S2S
lowerCAmelCase__ : Optional[int] = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCAmelCase , __UpperCAmelCase ):
logger.info(f"""{name} was ignored""" )
continue
lowerCAmelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ : int = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCAmelCase__ : Dict = True
if "*" in mapped_key:
lowerCAmelCase__ : List[Any] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : Any = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase__ : Any = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Tuple = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Dict = """bias"""
elif "weight" in name:
lowerCAmelCase__ : Any = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : Any = """running_mean"""
elif "running_var" in name:
lowerCAmelCase__ : List[Any] = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Tuple = """num_batches_tracked"""
else:
lowerCAmelCase__ : str = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : int = name.split(""".""" )
lowerCAmelCase__ : Optional[Any] = int(items[0] )
lowerCAmelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[Any]:
if config_path is not None:
lowerCAmelCase__ : Dict = SpeechTaConfig.from_pretrained(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
lowerCAmelCase__ : int = config.max_text_positions
lowerCAmelCase__ : Union[str, Any] = SpeechTaForSpeechToText(__UpperCAmelCase )
elif task == "t2s":
lowerCAmelCase__ : Any = 1876
lowerCAmelCase__ : Dict = 600
lowerCAmelCase__ : Union[str, Any] = config.max_speech_positions
lowerCAmelCase__ : List[Any] = SpeechTaForTextToSpeech(__UpperCAmelCase )
elif task == "s2s":
lowerCAmelCase__ : Optional[Any] = 1876
lowerCAmelCase__ : Optional[Any] = config.max_speech_positions
lowerCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(__UpperCAmelCase )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowerCAmelCase__ : int = SpeechTaTokenizer(__UpperCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[int] = AddedToken("""<mask>""" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
lowerCAmelCase__ : Dict = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowerCAmelCase__ : Union[str, Any] = SpeechTaFeatureExtractor()
lowerCAmelCase__ : Optional[int] = SpeechTaProcessor(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.load(__UpperCAmelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] , __UpperCAmelCase , __UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 507 | 0 |
from torch import nn
def UpperCamelCase( __UpperCamelCase : Optional[Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 171 |
import random
from typing import Any
def UpperCamelCase( __UpperCamelCase : list ):
for _ in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ : Union[str, Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ : List[Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
A__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
A__ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 171 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCamelCase = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
UpperCAmelCase_ : Any = dataset_path.split("://" )[1]
return dataset_path
def a__ ( _SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( _SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[int] = threading.Lock()
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE_ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowercase (_lowerCAmelCase=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser("""tpu-config""" , description=_description )
else:
__lowerCAmelCase = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
__lowerCAmelCase = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_lowerCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_lowerCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
__lowerCAmelCase = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_lowerCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
__lowerCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCAmelCase = defaults.commands
if not args.tpu_name:
__lowerCAmelCase = defaults.tpu_name
if not args.tpu_zone:
__lowerCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCAmelCase = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
__lowerCAmelCase = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _lowerCAmelCase ):
__lowerCAmelCase = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
__lowerCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCAmelCase ):
__lowerCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCAmelCase = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__lowerCAmelCase = """; """.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCAmelCase = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(_lowerCAmelCase )}""" )
return
subprocess.run(_lowerCAmelCase )
print("""Successfully setup pod.""" )
def lowercase ():
__lowerCAmelCase = tpu_command_parser()
__lowerCAmelCase = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 465 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 465 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['LayoutLMv3FeatureExtractor']
_lowercase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: int = tempfile.mkdtemp()
lowerCamelCase__: Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCamelCase__: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__: Optional[int] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCamelCase__: List[str] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def lowerCamelCase_ ( self : List[Any] , **__a : List[str] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : int , **__a : str ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : str , **__a : Tuple ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__: Dict = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self.get_tokenizer()
lowerCamelCase__: List[Any] = self.get_rust_tokenizer()
lowerCamelCase__: Dict = self.get_image_processor()
lowerCamelCase__: int = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__: Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
lowerCamelCase__: str = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__: Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__: List[str] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCamelCase__: str = self.get_image_processor(do_normalize=__a )
lowerCamelCase__: List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: str = self.get_image_processor()
lowerCamelCase__: Union[str, Any] = self.get_tokenizer()
lowerCamelCase__: List[str] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: int = self.prepare_image_inputs()
lowerCamelCase__: List[str] = image_processor(__a , return_tensors="""np""" )
lowerCamelCase__: Optional[Any] = processor(images=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = self.get_image_processor()
lowerCamelCase__: Dict = self.get_tokenizer()
lowerCamelCase__: Optional[int] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__: Optional[Any] = processor(text=__a )
lowerCamelCase__: Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = self.get_image_processor()
lowerCamelCase__: Any = self.get_tokenizer()
lowerCamelCase__: Tuple = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: Dict = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__: int = self.prepare_image_inputs()
lowerCamelCase__: Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.get_image_processor()
lowerCamelCase__: List[Any] = self.get_tokenizer()
lowerCamelCase__: Optional[int] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__: Union[str, Any] = processor.batch_decode(__a )
lowerCamelCase__: Union[str, Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: str = self.get_image_processor()
lowerCamelCase__: Tuple = self.get_tokenizer()
lowerCamelCase__: Union[str, Any] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: Optional[int] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__: Tuple = self.prepare_image_inputs()
lowerCamelCase__: Optional[int] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 242 | 0 |
from math import pow
def a__ ( A__, A__, A__, A__, A__, ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE_ : List[Any] = int(pow(A__, A__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = backtrack(
A__, A__, current_number + 1, A__, A__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = backtrack(
A__, A__, current_number + 1, A__, A__ )
return current_sum, solutions_count
def a__ ( A__, A__ ):
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(A__, A__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
from statistics import mean, stdev
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ) -> list:
lowerCamelCase : Optional[int] = min(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,_SCREAMING_SNAKE_CASE ) for x in data]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ) -> list:
lowerCamelCase : Union[str, Any] = mean(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) ,_SCREAMING_SNAKE_CASE ) for x in data]
| 311 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : List[Any] = CustomTokenizer
pass
| 620 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering')
self.tool.setup()
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
| 620 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =(PNDMScheduler,)
UpperCamelCase__ : Dict =(('num_inference_steps', 50),)
def lowerCamelCase ( self : Dict , **lowercase_ : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : Any , lowercase_ : str=0 , **lowercase_ : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Tuple =self.dummy_sample
_lowerCamelCase : int =0.1 * sample
_lowerCamelCase : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : str =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
_lowerCamelCase : Union[str, Any] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[int] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : List[str]=0 , **lowercase_ : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Dict =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
_lowerCamelCase : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Any =self.get_scheduler_config()
_lowerCamelCase : int =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : Optional[int] =dummy_past_residuals[:]
_lowerCamelCase : List[str] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Optional[Any] , **lowercase_ : Any ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Any =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =10
_lowerCamelCase : str =self.dummy_model()
_lowerCamelCase : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase : Union[str, Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : Any =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase : List[Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : int =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[int] =kwargs.pop('num_inference_steps' , lowercase_ )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[str] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps' ):
_lowerCamelCase : Tuple =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : List[Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCamelCase : Union[str, Any] =dummy_past_residuals[:]
_lowerCamelCase : List[Any] =scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCamelCase : Tuple =scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
_lowerCamelCase : Optional[int] =self.scheduler_classes[0]
_lowerCamelCase : Dict =self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =27
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Dict =self.dummy_sample
_lowerCamelCase : List[Any] =0.1 * sample
_lowerCamelCase : List[Any] =self.get_scheduler_config()
_lowerCamelCase : List[str] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase : Tuple =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Dict =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop()
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Dict =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Union[str, Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 464 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '▁'
lowerCamelCase = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def a_ ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : List[str] =collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as reader:
_lowerCamelCase : Dict =reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : int =token.rstrip('\n' )
_lowerCamelCase : Tuple =index
return vocab
class A ( UpperCamelCase_ ):
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int =['input_ids', 'attention_mask']
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]="[SEP]" , lowercase_ : Any="[SEP]" , lowercase_ : Optional[int]="[SEP]" , lowercase_ : Optional[Any]="[UNK]" , lowercase_ : int="[PAD]" , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : Optional[int]="[MASK]" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ) -> None:
"""simple docstring"""
_lowerCamelCase : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
_lowerCamelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
_lowerCamelCase : Dict =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowerCamelCase : int ={'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
_lowerCamelCase : Dict =F'''[unused{i}]'''
_lowerCamelCase : List[str] =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowerCamelCase : Any =12
_lowerCamelCase : Dict ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase_ )
def __getstate__( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Dict =self.__dict__.copy()
_lowerCamelCase : Dict =None
return state
def __setstate__( self : str , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Any ={}
_lowerCamelCase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase : Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : List[Any] , lowercase_ : str ) -> str:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : Dict ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Optional[int] =self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self : str , lowercase_ : Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self : Any , lowercase_ : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
_lowerCamelCase : Dict =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 464 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 * 8 , _SCREAMING_SNAKE_CASE=32 * 8 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=64 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = hidden_dim
def UpperCAmelCase ( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self ):
_UpperCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase = self.num_queries
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = [1, 1, 1, 1]
_UpperCAmelCase = self.num_channels
_UpperCAmelCase = 64
_UpperCAmelCase = 128
_UpperCAmelCase = self.hidden_dim
_UpperCAmelCase = self.hidden_dim
_UpperCAmelCase = self.hidden_dim
return config
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_layers )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
with torch.no_grad():
_UpperCAmelCase = MaskaFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MaskaFormerForUniversalSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(_SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def UpperCAmelCase ( self ):
_UpperCAmelCase = MaskaFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ):
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase = MaskaFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
"""class_labels""": torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
_UpperCAmelCase = self.model_tester.get_config()
_UpperCAmelCase = MaskaFormerForUniversalSegmentation(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase ( self ):
if not self.model_tester.is_training:
return
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
model.train()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a = 1E-4
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase ( self ):
_UpperCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 384, 384) )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 384, 384) )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_UpperCAmelCase = inputs["""pixel_values"""].to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]]
_UpperCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None ) | 175 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Any:
if issubclass(snake_case , snake_case ):
_UpperCAmelCase = jsonl_path
elif issubclass(snake_case , snake_case ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=("train",) ) -> str:
assert isinstance(snake_case , snake_case )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , features=snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = """train"""
_UpperCAmelCase = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return json.load(snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return [json.loads(snake_case ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content | 175 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _A( snake_case__ ):
"""simple docstring"""
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Any = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__A : Dict = BertTokenizer.from_pretrained('bert-base-uncased' )
__A : List[str] = bertabert.config.encoder.vocab_size
__A : Union[str, Any] = tokenizer.sep_token_id
__A : Tuple = tokenizer.cls_token_id
__A : Optional[int] = 128
__A : Dict = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__A : Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__A : Optional[int] = train_dataset.select(range(32 ) )
__A : str = val_dataset.select(range(16 ) )
__A : Tuple = 4
def _map_to_encoder_decoder_inputs(_A ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__A : str = tokenizer(batch['article'] , padding='max_length' , truncation=_A , max_length=512 )
__A : str = tokenizer(batch['highlights'] , padding='max_length' , truncation=_A , max_length=128 )
__A : str = inputs.input_ids
__A : Union[str, Any] = inputs.attention_mask
__A : Dict = outputs.input_ids
__A : Optional[Any] = outputs.input_ids.copy()
__A : List[str] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__A : Any = outputs.attention_mask
assert all(len(_A ) == 512 for x in inputs.input_ids )
assert all(len(_A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_A ):
__A : str = pred.label_ids
__A : List[str] = pred.predictions
# all unnecessary tokens are removed
__A : Optional[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__A : Tuple = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__A : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_A ) )] ) / len(_A )
return {"accuracy": accuracy}
# map train dataset
__A : int = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_A , batch_size=_A , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__A : int = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_A , batch_size=_A , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__A : Optional[Any] = self.get_auto_remove_tmp_dir()
__A : Dict = SeqaSeqTrainingArguments(
output_dir=_A , per_device_train_batch_size=_A , per_device_eval_batch_size=_A , predict_with_generate=_A , evaluation_strategy='steps' , do_train=_A , do_eval=_A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__A : Union[str, Any] = SeqaSeqTrainer(
model=_A , args=_A , compute_metrics=_compute_metrics , train_dataset=_A , eval_dataset=_A , tokenizer=_A , )
# start training
trainer.train()
| 239 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Optional[Any]:
__A : List[Any] = int(np.ceil((x_end - xa) / h ) )
__A : Tuple = np.zeros((n + 1,) )
__A : Tuple = ya
__A : Optional[Any] = xa
for k in range(a ):
__A : List[Any] = f(a , y[k] )
__A : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__A : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__A : Dict = f(x + h , y[k] + h * ka )
__A : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: List[str] , ) -> Union[str, Any]:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 712 |
'''simple docstring'''
def _A ( A__ = 1000000 ):
"""simple docstring"""
__lowercase = set(range(3 , A__ , 2 ) )
primes.add(2 )
for p in range(3 , A__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A__ , A__ ) ) )
__lowercase = [float(A__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A__ , limit + 1 , A__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 624 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 675 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCamelCase ( snake_case__ ):
'''simple docstring'''
__UpperCamelCase : str = """openai-gpt"""
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , snake_case_ : List[str]=4_0478 , snake_case_ : Dict=512 , snake_case_ : Optional[int]=768 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : int=0.1 , snake_case_ : Dict=0.1 , snake_case_ : List[Any]=1e-5 , snake_case_ : List[str]=0.02 , snake_case_ : Dict="cls_index" , snake_case_ : Optional[Any]=True , snake_case_ : Optional[int]=None , snake_case_ : Any=True , snake_case_ : Dict=0.1 , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Optional[int] = n_positions
UpperCamelCase_: Dict = n_embd
UpperCamelCase_: Any = n_layer
UpperCamelCase_: Optional[int] = n_head
UpperCamelCase_: Optional[int] = afn
UpperCamelCase_: int = resid_pdrop
UpperCamelCase_: Tuple = embd_pdrop
UpperCamelCase_: Any = attn_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: Optional[Any] = summary_type
UpperCamelCase_: Tuple = summary_use_proj
UpperCamelCase_: Dict = summary_activation
UpperCamelCase_: List[Any] = summary_first_dropout
UpperCamelCase_: Dict = summary_proj_to_labels
super().__init__(**UpperCAmelCase_ )
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
"""simple docstring"""
import math
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 0
while num > 0:
_UpperCamelCase = num % 8
_UpperCamelCase = octal + (remainder * math.floor(math.pow(10, snake_case__ ) ))
counter += 1
_UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(snake_case__ )}'''
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 19 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__=0.01 , a__=10_00 ) -> List[Any]:
'''simple docstring'''
__snake_case :int = p_stop
__snake_case :List[Any] = max_length
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :str = 0
__snake_case :Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case :str = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self , a__ , a__ , a__=False , a__=True ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
__snake_case :Union[str, Any] = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
__snake_case :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :int = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case :List[str] = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowercase ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ) -> List[str]:
'''simple docstring'''
random.seed(a__ )
__snake_case :Optional[int] = list(a__ )
__snake_case :Union[str, Any] = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
__snake_case :Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
__snake_case :Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case :str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
__snake_case :int = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = 42
__snake_case :Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
__snake_case :Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :str = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case :Dict = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
Accelerator()
__snake_case :Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 455 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
UpperCAmelCase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class __lowerCAmelCase :
def __call__( self : List[str] , A : Any , A : Optional[str] = None , A : Optional[str] = None , A : Union[bool, str] = False , A : Union[bool, str] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = None , **A : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase = titles if not isinstance(A , A) else [titles]
_UpperCAmelCase = texts if not isinstance(A , A) else [texts]
_UpperCAmelCase = len(A)
_UpperCAmelCase = questions if not isinstance(A , A) else [questions] * n_passages
assert len(A) == len(
A), F"There should be as many titles than texts but got {len(A)} titles and {len(A)} texts."
_UpperCAmelCase = super().__call__(A , A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = super().__call__(A , add_special_tokens=A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A)
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_UpperCAmelCase = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A)
def _lowerCamelCase ( self : Dict , A : BatchEncoding , A : DPRReaderOutput , A : int = 16 , A : int = 64 , A : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(A)
_UpperCAmelCase = sorted(range(A) , reverse=A , key=relevance_logits.__getitem__)
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id)
else:
_UpperCAmelCase = len(A)
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(A) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : List[int] , A : int , A : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = []
for start_index, start_score in enumerate(A):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_UpperCAmelCase = sorted(A , key=lambda A: x[1] , reverse=A)
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(A) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __lowerCAmelCase ( A , A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DPRReaderTokenizer
| 639 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname, UpperCAmelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
def lowercase__ ( self : int, **lowerCamelCase : Dict ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def lowercase__ ( self : Tuple, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def lowercase__ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(UpperCAmelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCAmelCase__ )
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCAmelCase__ )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=UpperCAmelCase__, padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=UpperCAmelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCAmelCase__ )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(UpperCAmelCase__, return_tensors='''np''' )
lowercase__ = processor(images=UpperCAmelCase__, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=UpperCAmelCase__ )
lowercase__ = tokenizer(UpperCAmelCase__, padding='''max_length''', max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCAmelCase__, images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(UpperCAmelCase__ )
lowercase__ = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCAmelCase__, images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 183 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Optional[Any] = 1_6
a__ : str = 3_2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 682 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=0.6 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = mask_ratio
lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = ViTMAEModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = ViTMAEForPreTraining(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase)
lowerCAmelCase = (self.image_size // self.patch_size) ** 2
lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMAEForPreTraining(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase = model(__lowerCAmelCase)
lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCAmelCase_ : Optional[int] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[Any] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ViTMAEModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
lowerCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
np.random.seed(2)
lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase = pt_noise
super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
lowerCAmelCase = outputs[0].cpu().numpy()
lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = model_class.from_pretrained(__lowerCAmelCase)
model.to(__lowerCAmelCase)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase))
# Make sure we don't have nans
lowerCAmelCase = after_outputs[0].cpu().numpy()
lowerCAmelCase = 0
lowerCAmelCase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__lowerCAmelCase , 1E-5)
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def a_ ( self):
"""simple docstring"""
pass
@slow
def a_ ( self):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMAEModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""") if is_vision_available() else None
@slow
def a_ ( self):
"""simple docstring"""
np.random.seed(2)
lowerCAmelCase = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""").to(__lowerCAmelCase)
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""").to(__lowerCAmelCase)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase = ViTMAEConfig()
lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
lowerCAmelCase = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**__lowerCAmelCase , noise=torch.from_numpy(__lowerCAmelCase).to(device=__lowerCAmelCase))
# verify the logits
lowerCAmelCase = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__lowerCAmelCase) , atol=1E-4))
| 703 | '''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowercase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowercase = concatenate_datasets
__lowercase = DownloadConfig
__lowercase = DownloadManager
__lowercase = DownloadMode
__lowercase = DownloadConfig
__lowercase = DownloadMode
__lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 605 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _UpperCamelCase ( __UpperCamelCase ) -> None:
create_state_space_tree(__UpperCamelCase ,[] ,0 )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> None:
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
create_state_space_tree(__UpperCamelCase ,__UpperCamelCase ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__UpperCamelCase ,__UpperCamelCase ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 42 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__A : Optional[int] = None
__A : Optional[jnp.ndarray] = None
__A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
return cls()
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : jnp.ndarray
__A : jnp.ndarray
__A : KarrasVeSchedulerState
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __A = 0.02 , __A = 100 , __A = 1.007 , __A = 80 , __A = 0.05 , __A = 50 , ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _snake_case ( self , __A , __A , __A = () ):
"""simple docstring"""
lowerCamelCase : List[str] = jnp.arange(0 , __A )[::-1].copy()
lowerCamelCase : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A , schedule=jnp.array(__A , dtype=jnp.floataa ) , timesteps=__A , )
def _snake_case ( self , __A , __A , __A , __A , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : int = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : Any = random.split(__A , num=1 )
lowerCamelCase : List[Any] = self.config.s_noise * random.normal(key=__A , shape=sample.shape )
lowerCamelCase : Dict = sigma + gamma * sigma
lowerCamelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , __A , __A , __A , __A , __A , __A = True , ):
"""simple docstring"""
lowerCamelCase : Dict = sample_hat + sigma_hat * model_output
lowerCamelCase : str = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A = True , ):
"""simple docstring"""
lowerCamelCase : List[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def _snake_case ( self , __A , __A , __A , __A ):
"""simple docstring"""
raise NotImplementedError()
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Optional[Any] = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 341 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> np.ndarray:
'''simple docstring'''
if (ksize % 2) == 0:
__A = ksize + 1
__A = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(snake_case ):
for x in range(snake_case ):
# distance from center
__A = x - ksize // 2
__A = y - ksize // 2
# degree to radiant
__A = theta / 1_8_0 * np.pi
__A = np.cos(_theta )
__A = np.sin(_theta )
# get kernel x
__A = cos_theta * px + sin_theta * py
# get kernel y
__A = -sin_theta * px + cos_theta * py
# fill kernel
__A = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_UpperCamelCase : int = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_UpperCamelCase : str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_UpperCamelCase : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_UpperCamelCase : Optional[Any] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_UpperCamelCase : Any = out / out.max() * 2_5_5
_UpperCamelCase : Optional[Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 341 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = RemBertConfig.from_json_file(_lowerCamelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(_lowerCamelCase ) ) )
__snake_case = RemBertModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(_lowerCamelCase ) )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 24 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 0 |
def a__ ( A_ = 1000 ):
'''simple docstring'''
return sum(e for e in range(3, A_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 76 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ )
__magic_name__ = downstream_dict["""projector.weight"""]
__magic_name__ = downstream_dict["""projector.bias"""]
__magic_name__ = downstream_dict["""model.post_net.linear.weight"""]
__magic_name__ = downstream_dict["""model.post_net.linear.bias"""]
return model
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ )
__magic_name__ = downstream_dict["""model.linear.weight"""]
__magic_name__ = downstream_dict["""model.linear.bias"""]
return model
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ )
__magic_name__ = downstream_dict["""connector.weight"""]
__magic_name__ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__magic_name__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__magic_name__ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = checkpoint["""Downstream"""]
__magic_name__ = WavaVecaConfig.from_pretrained(A_ )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(
A_, return_attention_mask=A_, do_normalize=A_ )
__magic_name__ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__magic_name__ = convert_classification(A_, A_, A_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
__magic_name__ = convert_diarization(A_, A_, A_ )
elif arch.endswith("""ForXVector""" ):
__magic_name__ = convert_xvector(A_, A_, A_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__magic_name__ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowerCAmelCase : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 76 | 1 |
import os
import sys
UpperCAmelCase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoConfig.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoTokenizer.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoModel.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoModelForCausalLM.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoModelForMaskedLM.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoModelForSequenceClassification.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase_ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return AutoModelForQuestionAnswering.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 84 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCamelCase : Tuple = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCamelCase : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCamelCase : List[str] = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCamelCase : Optional[Any] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCamelCase : Union[str, Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase (datasets.Metric ):
def __UpperCAmelCase ( self )-> str:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=[1, 1_0, 1_0_0] , __UpperCamelCase=4 , __UpperCamelCase=3.0 )-> str:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
__lowerCAmelCase = []
__lowerCAmelCase = Counter()
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
__lowerCAmelCase = candidate + "\n" + test_case
__lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
__lowerCAmelCase = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for result in results.values():
result.sort()
__lowerCAmelCase = [r[1]["passed"] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
__lowerCAmelCase = np.array(__UpperCamelCase )
__lowerCAmelCase = np.array(__UpperCamelCase )
__lowerCAmelCase = k
__lowerCAmelCase = {F"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
def estimator(__snake_case , __snake_case , __snake_case ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase = itertools.repeat(__snake_case , len(__snake_case ) )
else:
assert len(__snake_case ) == len(__snake_case )
__lowerCAmelCase = iter(__snake_case )
return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
| 367 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = 0
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Tuple = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Optional[Any] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
lowerCAmelCase_ :Optional[Any] = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
lowerCAmelCase_ :Any = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCAmelCase ( self ) -> Dict:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Optional[int] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
lowerCAmelCase_ :Optional[Any] = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
lowerCAmelCase_ :Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase_ :Union[str, Any] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
lowerCAmelCase_ :List[Any] = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase_ :Dict = AutoImageProcessor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase_ :Optional[Any] = CLIPImageProcessor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ :str = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
lowerCAmelCase_ :Dict = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :str = Path(UpperCamelCase_ ) / "preprocessor_config.json"
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
lowerCAmelCase_ :Any = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
UpperCamelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :Any = AutoImageProcessor.from_pretrained("""clip-base""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoImageProcessor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase_ :Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __lowerCAmelCase ( self ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase_ :List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase_ :Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase_ :Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ :int = AutoImageProcessor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Union[str, Any] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
lowerCAmelCase_ :Dict = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
lowerCAmelCase_ :Dict = CustomImageProcessor.from_pretrained(UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ :Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Optional[int]:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 710 |
"""simple docstring"""
import numpy as np
def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ (_a , unittest.TestCase ):
lowercase_ : int = PhobertTokenizer
lowercase_ : int = False
def A__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCAmelCase__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''l à</w>''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def A__ ( self : Any , **__lowerCamelCase : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def A__ ( self : Optional[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = '''Tôi là VinAI Research'''
lowerCAmelCase__ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def A__ ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''Tôi là VinAI Research'''
lowerCAmelCase__ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCAmelCase__ = tokenizer.tokenize(__lowerCamelCase )
print(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
| 615 |
from __future__ import annotations
def a_ ( __lowerCAmelCase ):
if not nums:
return 0
lowerCAmelCase__ = nums[0]
lowerCAmelCase__ = 0
for num in nums[1:]:
lowerCAmelCase__ , lowerCAmelCase__ = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 615 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_a : Optional[Any] = 1
_a : str = 1
while repunit:
_a : Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 319 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Union[str, Any] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__a: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__a: Union[str, Any] = logging.get_logger(__name__)
__a: str = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a: Dict = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a: Union[str, Any] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BlenderbotSmallTokenizer
def __init__( self : List[str] , lowerCamelCase : List[str]=None , lowerCamelCase : str=None , lowerCamelCase : Optional[Any]="<|endoftext|>" , lowerCamelCase : Dict="<|endoftext|>" , lowerCamelCase : str="<|endoftext|>" , lowerCamelCase : str=False , lowerCamelCase : Tuple=True , **lowerCamelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCamelCase , merges=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , ) , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = add_prefix_space
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=None ) -> str:
"""simple docstring"""
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 108 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowerCamelCase : List[Any] = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_lowerCamelCase : int = cvtColor(img, COLOR_BGR2GRAY)
def _a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _a ( ) -> Union[str, Any]:
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE__ , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ : Optional[int] = canny.canny(SCREAMING_SNAKE_CASE__ )
# assert canny array for at least one True
assert canny_array.any()
def _a ( ) -> str:
'''simple docstring'''
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE__ , 5 , sigma=0.9 ).all()
def _a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).astype(SCREAMING_SNAKE_CASE__ )
assert res.any()
def _a ( ) -> Union[str, Any]:
'''simple docstring'''
assert med.median_filter(SCREAMING_SNAKE_CASE__ , 3 ).any()
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = sob.sobel_filter(SCREAMING_SNAKE_CASE__ )
assert grad.any() and theta.any()
def _a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sp.make_sepia(SCREAMING_SNAKE_CASE__ , 20 )
assert sepia.all()
def _a ( SCREAMING_SNAKE_CASE__ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(SCREAMING_SNAKE_CASE__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _a ( SCREAMING_SNAKE_CASE__ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ : int = imread(SCREAMING_SNAKE_CASE__ , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : int = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ : Optional[Any] = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ : List[str] = lbp.local_binary_value(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert lbp_image.any()
| 157 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Optional[int] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "lm_head"
SCREAMING_SNAKE_CASE__ : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Dict = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ : str = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : str = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ : List[str] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : str = "weight"
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ : Any = name.split("." )
SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : str = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : str = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : int = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : Dict = 42
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 43
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 157 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase_ ( _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : Union[str, Any] = subparsers.add_parser('''env''' )
else:
snake_case_ : Optional[int] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_UpperCamelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = torch.__version__
snake_case_ : List[Any] = torch.cuda.is_available()
snake_case_ : str = is_xpu_available()
snake_case_ : List[Any] = is_npu_available()
snake_case_ : List[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
snake_case_ : Any = load_config_from_file(args.config_file ).to_dict()
snake_case_ : Dict = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_UpperCamelCase ),
'''PyTorch NPU available''': str(_UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
snake_case_ : List[str] = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
snake_case_ : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(_UpperCamelCase )
snake_case_ : Dict = accelerate_config
return info
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = env_command_parser()
snake_case_ : int = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 60 | import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger()
SCREAMING_SNAKE_CASE : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( a_ ):
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : int ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
__a = {"source": "What is love ?", "target": "life"}
__a = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__a = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__SCREAMING_SNAKE_CASE , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str = "pytorch" ):
__a = self.get_auto_remove_tmp_dir()
__a = os.path.join(__SCREAMING_SNAKE_CASE , "output" )
__a = os.path.join(__SCREAMING_SNAKE_CASE , "data" )
self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE )
__a = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
__a = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
__a = os.path.join(__SCREAMING_SNAKE_CASE , "metrics.json" )
with open(__SCREAMING_SNAKE_CASE ) as f:
__a = json.load(__SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def _UpperCAmelCase ( self : Dict ):
__a = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
__a = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self : Optional[Any] ):
__a = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self : Any ):
__a = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 197 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( ) -> List[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
__UpperCamelCase : Dict = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__UpperCamelCase : Tuple = json.loads(__lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__UpperCamelCase : Tuple = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__UpperCamelCase : str = json.loads(__lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _A ( UpperCAmelCase_ ):
lowercase_ : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def a ( self : Optional[Any] ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCamelCase__ , )
@cached_property
def a ( self : Tuple ):
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__UpperCamelCase : int = torch.device("""cpu""" )
__UpperCamelCase : Union[str, Any] = 0
elif is_sagemaker_model_parallel_available():
__UpperCamelCase : str = smp.local_rank()
__UpperCamelCase : str = torch.device("""cuda""" , lowerCamelCase__ )
__UpperCamelCase : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__UpperCamelCase : Optional[Any] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__UpperCamelCase : str = torch.device("""cuda""" , self.local_rank )
__UpperCamelCase : List[str] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__UpperCamelCase : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__UpperCamelCase : List[str] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__UpperCamelCase : List[str] = torch.device("""cuda""" , self.local_rank )
__UpperCamelCase : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCamelCase__ )
return device
@property
def a ( self : Any ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def a ( self : int ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def a ( self : Optional[int] ):
"""simple docstring"""
return False
| 515 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Optional[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> str:
__UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Optional[int]:
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def __lowerCamelCase ( ) -> Tuple:
# laplace diagonals
__UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def __lowerCamelCase ( ) -> List[str]:
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def __lowerCamelCase ( ) -> int:
__UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
__UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__UpperCamelCase : int = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = image[x_coordinate][y_coordinate]
__UpperCamelCase : Tuple = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 515 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> List[str]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]:
__snake_case = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase )
__snake_case = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sigmoid"""
__SCREAMING_SNAKE_CASE = """softmax"""
__SCREAMING_SNAKE_CASE = """none"""
@add_end_docstrings(
_UpperCamelCase , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
def __init__( self : List[str] , **a_ : List[Any] ):
"""simple docstring"""
super().__init__(**a_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def A ( self : Any , a_ : int=None , a_ : Any=None , a_ : Union[str, Any]="" , **a_ : Tuple ):
"""simple docstring"""
__snake_case = tokenizer_kwargs
__snake_case = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
__snake_case = self.model.config.return_all_scores
if isinstance(a_ , a_ ) or top_k is None:
__snake_case = top_k
__snake_case = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , a_ , )
if return_all_scores:
__snake_case = None
else:
__snake_case = 1
if isinstance(a_ , a_ ):
__snake_case = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , *a_ : Optional[int] , **a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = super().__call__(*a_ , **a_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case = "top_k" not in kwargs
if isinstance(args[0] , a_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def A ( self : Union[str, Any] , a_ : Optional[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.framework
if isinstance(a_ , a_ ):
return self.tokenizer(**a_ , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(a_ , return_tensors=a_ , **a_ )
def A ( self : List[str] , a_ : Tuple ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : Dict , a_ : Union[str, Any] , a_ : int=None , a_ : Union[str, Any]=1 , a_ : Tuple=True ):
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
__snake_case = self.model.config.function_to_apply
else:
__snake_case = ClassificationFunction.NONE
__snake_case = model_outputs["logits"][0]
__snake_case = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case = sigmoid(a_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case = softmax(a_ )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(a_ )
]
if not _legacy:
dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ )
if top_k is not None:
__snake_case = dict_scores[:top_k]
return dict_scores
| 69 |
import operator as op
__UpperCamelCase : Optional[Any] = "scaler.pt"
__UpperCamelCase : Optional[Any] = "pytorch_model"
__UpperCamelCase : str = "random_states"
__UpperCamelCase : Optional[int] = "optimizer"
__UpperCamelCase : Optional[int] = "scheduler"
__UpperCamelCase : str = "pytorch_model.bin"
__UpperCamelCase : List[str] = "pytorch_model.bin.index.json"
__UpperCamelCase : List[str] = "model.safetensors"
__UpperCamelCase : Optional[int] = "model.safetensors.index.json"
__UpperCamelCase : List[str] = "1.10.2"
__UpperCamelCase : Dict = "py38"
__UpperCamelCase : List[str] = "4.17.0"
__UpperCamelCase : Any = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__UpperCamelCase : Any = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__UpperCamelCase : int = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__UpperCamelCase : Dict = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__UpperCamelCase : str = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__UpperCamelCase : List[Any] = "2.0.1"
__UpperCamelCase : int = ["pdsh", "standard", "openmpi", "mvapich"]
__UpperCamelCase : List[str] = ["default", "reduce-overhead", "max-autotune"]
__UpperCamelCase : List[Any] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCamelCase : List[Any] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__UpperCamelCase : List[str] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__UpperCamelCase : int = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 468 | 0 |
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase ( A , A ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = []
UpperCAmelCase_ = 11
UpperCAmelCase_ = int("1" + "0" * digit_len )
for num in range(A , A ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(A , A ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
UpperCAmelCase_ = 10
return solutions
def __lowerCAmelCase ( A = 2 ):
UpperCAmelCase_ = 1.0
for fraction in fraction_list(A ):
UpperCAmelCase_ = Fraction(A )
result *= frac.denominator / frac.numerator
return int(A )
if __name__ == "__main__":
print(solution()) | 268 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def __A ( self : Union[str, Any] , **lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase )
return config
def __A ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def __A ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def __A ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = output.prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def __A ( self : int ):
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = output.prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def __A ( self : Dict ):
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.to(lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = output.prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) )
if str(lowerCAmelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3 | 268 | 1 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Optional[int]:
return 1 / (1 + np.exp(-z ))
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> List[str]:
return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
__snake_case = np.dot(_UpperCAmelCase , _UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=7_00_00 ) -> Union[str, Any]:
__snake_case = np.zeros(x.shape[1] )
for iterations in range(_UpperCAmelCase ):
__snake_case = np.dot(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = sigmoid_function(_UpperCAmelCase )
__snake_case = np.dot(x.T , h - y ) / y.size
__snake_case = theta - alpha * gradient # updating the weights
__snake_case = np.dot(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = sigmoid_function(_UpperCAmelCase )
__snake_case = cost_function(_UpperCAmelCase , _UpperCAmelCase )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a : int = datasets.load_iris()
a : int = iris.data[:, :2]
a : Optional[Any] = (iris.target != 0) * 1
a : Tuple = 0.1
a : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
return sigmoid_function(
np.dot(_UpperCAmelCase , _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((a) , (a)) : Any = (x[:, 0].min(), x[:, 0].max())
((a) , (a)) : Any = (x[:, 1].min(), x[:, 1].max())
((a) , (a)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
a : List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 69 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
class UpperCAmelCase :
def __init__(self : Union[str, Any] , A__ : List[str] ) -> Optional[int]:
lowercase = metric_id
class UpperCAmelCase :
UpperCAmelCase : str = [MetricMock(_lowercase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def UpperCAmelCase__ (self : List[str] ) -> int:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if "tmp_path" in args:
lowercase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 459 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = min(lowerCAmelCase_ ) # min() finds the minimum value
lowercase = max(lowerCAmelCase_ ) # max() finds the maximum value
lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase = 0
for count in range(lowerCAmelCase_ ):
while holes[count] > 0:
holes[count] -= 1
lowercase = count + min_val
i += 1
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase_ )
print("Sorted order is:" , " ".join(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
| 459 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=1e-12 ) -> Dict:
"""simple docstring"""
snake_case : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a__ , axis=1 ) , a_min=a__ ) ).T
snake_case : Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a__ , axis=1 ) , a_min=a__ ) ).T
return jnp.matmul(a__ , norm_emb_a.T )
class a_ ( nn.Module ):
A__ : int = 42
A__ : List[str] = jnp.floataa
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Dict = FlaxCLIPVisionModule(self.config.vision_config )
snake_case : str = nn.Dense(self.config.projection_dim , use_bias=__UpperCamelCase , dtype=self.dtype )
snake_case : int = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
snake_case : int = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
snake_case : Union[str, Any] = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
snake_case : int = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
snake_case : int = self.vision_model(__UpperCamelCase )[1]
snake_case : Any = self.visual_projection(__UpperCamelCase )
snake_case : str = jax_cosine_distance(__UpperCamelCase , self.special_care_embeds )
snake_case : str = jax_cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
snake_case : Tuple = 0.0
snake_case : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
snake_case : Any = jnp.round(__UpperCamelCase , 3 )
snake_case : str = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCamelCase )
# Use a lower threshold if an image has any special care concept
snake_case : Optional[Any] = is_special_care * 0.01
snake_case : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
snake_case : Optional[Any] = jnp.round(__UpperCamelCase , 3 )
snake_case : str = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class a_ ( _lowercase ):
A__ : List[Any] = CLIPConfig
A__ : Union[str, Any] = 'clip_input'
A__ : int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCAmelCase__ : CLIPConfig , UpperCAmelCase__ : Optional[Tuple] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : jnp.dtype = jnp.floataa , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
if input_shape is None:
snake_case : Optional[int] = (1, 224, 224, 3)
snake_case : List[Any] = self.module_class(config=__UpperCamelCase , dtype=__UpperCamelCase , **__UpperCamelCase )
super().__init__(__UpperCamelCase , __UpperCamelCase , input_shape=__UpperCamelCase , seed=__UpperCamelCase , dtype=__UpperCamelCase , _do_init=_do_init )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : jax.random.KeyArray , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : FrozenDict = None ):
"""simple docstring"""
# init input tensor
snake_case : Union[str, Any] = jax.random.normal(__UpperCamelCase , __UpperCamelCase )
snake_case , snake_case : Dict = jax.random.split(__UpperCamelCase )
snake_case : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
snake_case : Dict = self.module.init(__UpperCamelCase , __UpperCamelCase )['''params''']
return random_params
def __call__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : dict = None , ):
"""simple docstring"""
snake_case : Optional[int] = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 598 | """simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase ( a__ : dict , a__ : str , a__ : set , a__ : set , a__ : dict , a__ : dict , a__ : PriorityQueue , a__ : dict , a__ : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCamelCase = cst_fwd.get(a__ , np.inf )
_UpperCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCamelCase = new_cost_f
_UpperCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase ( a__ : str , a__ : str , a__ : dict , a__ : dict ) -> int:
_UpperCamelCase = -1
_UpperCamelCase = set()
_UpperCamelCase = set()
_UpperCamelCase = {source: 0}
_UpperCamelCase = {destination: 0}
_UpperCamelCase = {source: None}
_UpperCamelCase = {destination: None}
_UpperCamelCase = PriorityQueue()
_UpperCamelCase = PriorityQueue()
_UpperCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCamelCase , _UpperCamelCase = queue_forward.get()
visited_forward.add(a__ )
_UpperCamelCase , _UpperCamelCase = queue_backward.get()
visited_backward.add(a__ )
_UpperCamelCase = pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
_UpperCamelCase = pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCamelCase = shortest_distance
return shortest_path_distance
UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 0 |
def UpperCamelCase ( _A : list[int] , _A : list[int] , _A : int )-> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_A ) )
def UpperCamelCase ( _A : list[list[int]] , _A : int , _A : list[int] , _A : int )-> bool:
"""simple docstring"""
if index == len(_A ):
return True
# Recursive Step
for i in range(_A ):
if valid_coloring(graph[index] , _A , _A ):
# Color current vertex
A__ = i
# Validate coloring
if util_color(_A , _A , _A , index + 1 ):
return True
# Backtrack
A__ = -1
return False
def UpperCamelCase ( _A : list[list[int]] , _A : int )-> list[int]:
"""simple docstring"""
A__ = [-1] * len(_A )
if util_color(_A , _A , _A , 0 ):
return colored_vertices
return []
| 232 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( _A : list , _A : list , _A : list , _A : list , _A : list )-> float:
"""simple docstring"""
A__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_A )] )
A__ = np.array(_A )
A__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _A ) ) , x.transpose() ) , _A )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( _A : list , _A : list , _A : list )-> float:
"""simple docstring"""
A__ = (1, 2, 1)
A__ = (1, 1, 0, 7)
A__ = SARIMAX(
_A , exog=_A , order=_A , seasonal_order=_A )
A__ = model.fit(disp=_A , maxiter=600 , method="nm" )
A__ = model_fit.predict(1 , len(_A ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( _A : list , _A : list , _A : list )-> float:
"""simple docstring"""
A__ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_A , _A )
A__ = regressor.predict(_A )
return y_pred[0]
def UpperCamelCase ( _A : list )-> float:
"""simple docstring"""
train_user.sort()
A__ = np.percentile(_A , 25 )
A__ = np.percentile(_A , 75 )
A__ = qa - qa
A__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( _A : list , _A : float )-> bool:
"""simple docstring"""
A__ = 0
A__ = 0
for i in list_vote:
if i > actual_result:
A__ = not_safe + 1
else:
if abs(abs(_A ) - abs(_A ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase_ : Tuple = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCAmelCase_ : Optional[int] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCAmelCase_ : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase_ : str = normalize_df[:, 2].tolist()
UpperCAmelCase_ : Optional[int] = normalize_df[:, 0].tolist()
UpperCAmelCase_ : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase_ : Optional[int] = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase_ : List[str] = x[: len(x) - 1]
UpperCAmelCase_ : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase_ : Union[str, Any] = total_date[: len(total_date) - 1]
UpperCAmelCase_ : Dict = total_user[: len(total_user) - 1]
UpperCAmelCase_ : Any = total_match[: len(total_match) - 1]
UpperCAmelCase_ : str = total_date[len(total_date) - 1 :]
UpperCAmelCase_ : Dict = total_user[len(total_user) - 1 :]
UpperCAmelCase_ : Dict = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase_ : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase_ : Optional[int] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 232 | 1 |
def A_ ( _UpperCAmelCase = 1_00 ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase :int = logging.getLogger(__name__)
class UpperCAmelCase ( __snake_case ):
a: int = "sequence-classification"
def __init__( self: int , __UpperCamelCase: str ):
if type(__UpperCamelCase ) == dict:
_a = Namespace(**__UpperCamelCase )
_a = glue_output_modes[hparams.task]
_a = glue_tasks_num_labels[hparams.task]
super().__init__(__UpperCamelCase , __UpperCamelCase , self.mode )
def _A ( self: Dict , **__UpperCamelCase: str ):
return self.model(**__UpperCamelCase )
def _A ( self: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any] ):
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_a = self(**__UpperCamelCase )
_a = outputs[0]
_a = self.trainer.lr_schedulers[0]['''scheduler''']
_a = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _A ( self: Union[str, Any] ):
_a = self.hparams
_a = processors[args.task]()
_a = processor.get_labels()
for mode in ["train", "dev"]:
_a = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __UpperCamelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_a = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_a = convert_examples_to_features(
__UpperCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def _A ( self: str , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: bool = False ):
_a = '''dev''' if mode == '''test''' else mode
_a = self._feature_file(__UpperCamelCase )
logger.info('''Loading features from cached file %s''' , __UpperCamelCase )
_a = torch.load(__UpperCamelCase )
_a = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_a = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_a = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , )
def _A ( self: int , __UpperCamelCase: Tuple , __UpperCamelCase: Tuple ):
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_a = self(**__UpperCamelCase )
_a , _a = outputs[:2]
_a = logits.detach().cpu().numpy()
_a = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _A ( self: List[str] , __UpperCamelCase: Any ):
_a = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_a = np.argmax(__UpperCamelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_a = np.squeeze(__UpperCamelCase )
_a = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __UpperCamelCase , __UpperCamelCase )}
_a = dict(results.items() )
_a = results
return ret, preds_list, out_label_list
def _A ( self: Any , __UpperCamelCase: list ):
_a , _a , _a = self._eval_end(__UpperCamelCase )
_a = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _A ( self: List[Any] , __UpperCamelCase: Any ):
_a , _a , _a = self._eval_end(__UpperCamelCase )
_a = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _A ( __UpperCamelCase: Optional[Any] , __UpperCamelCase: int ):
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__UpperCamelCase , required=__UpperCamelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__UpperCamelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __snake_case ( ) -> List[Any]:
_a = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
_a = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
_a = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_a = os.path.join(
'''./results''' , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_a = GLUETransformer(_UpperCamelCase )
_a = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_a = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_UpperCamelCase ) )
_a = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main()
| 346 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :List[str] = {}
class UpperCAmelCase ( __snake_case ):
a: str = "llama"
a: List[str] = ["past_key_values"]
def __init__( self: Tuple , __UpperCamelCase: Optional[Any]=3_2000 , __UpperCamelCase: Optional[int]=4096 , __UpperCamelCase: Union[str, Any]=1_1008 , __UpperCamelCase: str=32 , __UpperCamelCase: List[str]=32 , __UpperCamelCase: Tuple=None , __UpperCamelCase: Dict="silu" , __UpperCamelCase: Any=2048 , __UpperCamelCase: Optional[int]=0.0_2 , __UpperCamelCase: int=1E-6 , __UpperCamelCase: List[Any]=True , __UpperCamelCase: List[str]=0 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: str=2 , __UpperCamelCase: int=1 , __UpperCamelCase: Optional[Any]=False , __UpperCamelCase: int=None , **__UpperCamelCase: Optional[int] , ):
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_a = num_attention_heads
_a = num_key_value_heads
_a = hidden_act
_a = initializer_range
_a = rms_norm_eps
_a = pretraining_tp
_a = use_cache
_a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , )
def _A ( self: Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
_a = self.rope_scaling.get('''type''' , __UpperCamelCase )
_a = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 346 | 1 |
# Lint as: python3
import itertools
import os
import re
lowercase_ : List[Any] = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowercase_ : Tuple = re.compile(r'''([a-z\d])([A-Z])''')
lowercase_ : Dict = re.compile(r'''(?<!_)_(?!_)''')
lowercase_ : Optional[int] = re.compile(r'''(_{2,})''')
lowercase_ : List[str] = r'''^\w+(\.\w+)*$'''
lowercase_ : int = r'''<>:/\|?*'''
def A__( __lowerCAmelCase ):
_snake_case : int = _uppercase_uppercase_re.sub(R'\1_\2' , __lowerCAmelCase )
_snake_case : Any = _lowercase_uppercase_re.sub(R'\1_\2' , __lowerCAmelCase )
return name.lower()
def A__( __lowerCAmelCase ):
_snake_case : List[str] = _single_underscore_re.split(__lowerCAmelCase )
_snake_case : Any = [_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != '' )
def A__( __lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowerCAmelCase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__lowerCAmelCase )}-{split}'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
_snake_case : Dict = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_snake_case : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return F'''{filepath}*'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
_snake_case : Any = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if shard_lengths:
_snake_case : Dict = len(__lowerCAmelCase )
_snake_case : Optional[int] = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
_snake_case : Tuple = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_snake_case : Dict = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 304 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
_snake_case : str = _modexpt(__lowerCAmelCase , exponent // 2 , __lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCAmelCase , exponent - 1 , __lowerCAmelCase )) % modulo_value
def A__( __lowerCAmelCase = 17_77 , __lowerCAmelCase = 18_55 , __lowerCAmelCase = 8 ):
_snake_case : Optional[Any] = base
for _ in range(1 , __lowerCAmelCase ):
_snake_case : Any = _modexpt(__lowerCAmelCase , __lowerCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 304 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : List[str] = pd.read_csv('sample_data.csv', header=None)
__A : List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Tuple = df.iloc[:, 1:2]
__A : List[str] = actual_data.values.reshape(len_data, 1)
__A : Tuple = MinMaxScaler().fit_transform(actual_data)
__A : Any = 1_0
__A : Any = 5
__A : Union[str, Any] = 2_0
__A : str = len_data - periods * look_back
__A : Optional[Any] = actual_data[:division]
__A : str = actual_data[division - look_back :]
__A : Tuple = [], []
__A : List[str] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : Optional[Any] = np.array(train_x)
__A : Any = np.array(test_x)
__A : Optional[int] = np.array([list(i.ravel()) for i in train_y])
__A : Dict = np.array([list(i.ravel()) for i in test_y])
__A : Optional[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__A : Union[str, Any] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__A : str = model.predict(x_test) | 709 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 0 |
import os
from collections.abc import Iterator
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ):
_A = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip('./' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return F"{i * ' '}*" if i else "\n##"
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "." ) -> None:
"""simple docstring"""
_A = ''
for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ):
_A, _A = os.path.split(_SCREAMING_SNAKE_CASE )
if filepath != old_path:
_A = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = (filepath.count(os.sep ) + 1) if filepath else 0
_A = F"{filepath}/{filename}".replace(' ' , '%20' )
_A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 27 |
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A_ ( A__ ) -> str:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( ) -> Optional[int]:
a__ : str = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def A_ ( A__ = 200_0000 ) -> Optional[Any]:
return sum(takewhile(lambda A__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 714 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : int = XLMRobertaModel.from_pretrained('xlm-roberta-base')
a__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
a__ : int = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
a__ : int = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ : Tuple = model(lowercase)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase , atol=1e-3))
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = XLMRobertaModel.from_pretrained('xlm-roberta-large')
a__ : str = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
a__ : List[Any] = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
a__ : List[Any] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ : Union[str, Any] = model(lowercase)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase , atol=1e-3))
| 392 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__magic_name__ : Any = 16
__magic_name__ : Dict = 32
def lowercase__ ( _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = "bert-base-cased") -> Any:
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase)
UpperCamelCase = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCamelCase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_UpperCamelCase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCamelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=1_28 , return_tensors='pt')
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt')
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase)
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase)
return train_dataloader, eval_dataloader
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> str:
"""simple docstring"""
UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config['num_epochs'])
UpperCamelCase = int(config['seed'])
UpperCamelCase = int(config['batch_size'])
UpperCamelCase = args.model_name_or_path
set_seed(_UpperCamelCase)
UpperCamelCase = get_dataloaders(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_UpperCamelCase , return_dict=_UpperCamelCase)
# Instantiate optimizer
UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_UpperCamelCase)
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase = 1
UpperCamelCase = (len(_UpperCamelCase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=0 , num_training_steps=_UpperCamelCase , )
else:
UpperCamelCase = DummyScheduler(_UpperCamelCase , total_num_steps=_UpperCamelCase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# We need to keep track of how many total steps we have iterated over
UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase = 0
# Now we train the model
UpperCamelCase = evaluate.load('glue' , 'mrpc')
UpperCamelCase = 0
UpperCamelCase = {}
for epoch in range(_UpperCamelCase , _UpperCamelCase):
model.train()
for step, batch in enumerate(_UpperCamelCase):
UpperCamelCase = model(**_UpperCamelCase)
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase = 0
for step, batch in enumerate(_UpperCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase = model(**_UpperCamelCase)
UpperCamelCase = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
UpperCamelCase = accelerator.gather(
(predictions, batch['labels'])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_UpperCamelCase) - 1:
UpperCamelCase = predictions[: len(eval_dataloader.dataset) - samples_seen]
UpperCamelCase = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _UpperCamelCase)
UpperCamelCase = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json') , 'w') as f:
json.dump(_UpperCamelCase , _UpperCamelCase)
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.')
parser.add_argument(
'--model_name_or_path' , type=_UpperCamelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCamelCase , )
parser.add_argument(
'--output_dir' , type=_UpperCamelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_UpperCamelCase , default=_UpperCamelCase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_UpperCamelCase , default=3 , help='Number of train epochs.' , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase)
if __name__ == "__main__":
main()
| 280 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __UpperCamelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=0 ):
UpperCAmelCase__: Any = 1.0 if scale is None else scale
UpperCAmelCase__: int = 0.0 if loc is None else loc
super().__init__(lowerCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase__ )] )
@property
def _UpperCAmelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ):
return self.variance.sqrt()
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__: Any = args_dim
UpperCAmelCase__: List[str] = nn.ModuleList([nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) for dim in args_dim.values()] )
UpperCAmelCase__: Any = domain_map
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: str = [proj(lowerCamelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase__ )
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__()
UpperCAmelCase__: str = function
def _UpperCAmelCase ( self , lowerCamelCase__ , *lowerCamelCase__ ):
return self.function(lowerCamelCase__ , *lowerCamelCase__ )
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def __init__( self , lowerCamelCase__ = 1 ):
UpperCAmelCase__: str = dim
UpperCAmelCase__: Any = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.dim == 1:
return self.distribution_class(*lowerCamelCase__ )
else:
return Independent(self.distribution_class(*lowerCamelCase__ ) , 1 )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
UpperCAmelCase__: Optional[Any] = self._base_distribution(lowerCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase__ , loc=lowerCamelCase__ , scale=lowerCamelCase__ , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ):
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ):
return 0.0
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return ParameterProjection(
in_features=lowerCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *lowerCamelCase__ ):
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( lowerCamelCase__ ):
return (x + torch.sqrt(torch.square(lowerCamelCase__ ) + 4.0 )) / 2.0
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"df": 1, "loc": 1, "scale": 1}
__magic_name__ = StudentT
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase__: List[Any] = 2.0 + cls.squareplus(lowerCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"loc": 1, "scale": 1}
__magic_name__ = Normal
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: str = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"total_count": 1, "logits": 1}
__magic_name__ = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = cls.squareplus(lowerCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__: str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) , 1 )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
UpperCAmelCase__ , UpperCAmelCase__: List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 113 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
_a : list[list[str]] = [[] for _ in range(__a )]
_a : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
_a : Tuple = position % (lowest * 2) # puts it in bounds
_a : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
_a : Tuple = [''.join(__a ) for row in temp_grid]
_a : List[str] = ''.join(__a )
return output_string
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
_a : str = []
_a : Optional[int] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_a : list[list[str]] = [[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
_a : Any = position % (lowest * 2) # puts it in bounds
_a : Union[str, Any] = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_a : Any = 0
for row in temp_grid: # fills in the characters
_a : List[Any] = input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
_a : str = '' # reads as zigzag
for position in range(len(__a ) ):
_a : Dict = position % (lowest * 2) # puts it in bounds
_a : List[str] = min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = {}
for key_guess in range(1 , len(__a ) ): # tries every key
_a : int = decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = (3, 32, 128)
_a : Any = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_a : Any = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
_a : Dict = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
_a : Dict = os.path.join(self.tmpdirname ,_a )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(_a ,_a )
def __lowercase ( self : Dict ,**_a : Any ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Optional[int] ,**_a : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
_a : int = Image.fromarray(np.moveaxis(_a ,0 ,-1 ) )
return image_input
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self.get_tokenizer()
_a : Optional[int] = self.get_image_processor()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_a )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = self.get_tokenizer()
_a : Any = self.get_image_processor()
_a : List[str] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : int = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
_a : Dict = self.get_image_processor(do_normalize=_a ,padding_value=1.0 )
_a : Optional[int] = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=_a ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_a )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = self.get_image_processor()
_a : int = self.get_tokenizer()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Optional[int] = self.prepare_image_inputs()
_a : Optional[int] = image_processor(_a ,return_tensors='np' )
_a : str = processor(images=_a ,return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Dict = 'test'
_a : Optional[Any] = processor(text=_a )
_a : Any = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : Any = self.get_tokenizer()
_a : List[str] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Optional[int] = 'test'
_a : Optional[Any] = self.prepare_image_inputs()
_a : Tuple = processor(text=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a : Any = processor.char_decode(_a )
_a : int = tokenizer.batch_decode(_a )
_a : List[Any] = [seq.replace(' ' ,'' ) for seq in decoded_tok]
self.assertListEqual(_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : List[str] = None
_a : int = self.prepare_image_inputs()
_a : List[str] = processor(text=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Tuple = torch.randn(1 ,27 ,38 )
_a : Optional[int] = torch.randn(1 ,27 ,5_0257 )
_a : List[str] = torch.randn(1 ,27 ,3_0522 )
_a : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 319 | 0 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=10_00 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = text_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = coordinate_size
_lowerCamelCase = shape_size
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase = text_seq_length
_lowerCamelCase = (image_size // patch_size) ** 2 + 1
_lowerCamelCase = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase = bbox[i, j, 3]
_lowerCamelCase = bbox[i, j, 1]
_lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase = bbox[i, j, 2]
_lowerCamelCase = bbox[i, j, 0]
_lowerCamelCase = t
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCamelCase = model(a__ , pixel_values=a__ )
_lowerCamelCase = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCamelCase = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCamelCase = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case_ ( self ):
_lowerCamelCase = LayoutLMvaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def snake_case_ ( self , a__ , a__ , a__=False ):
_lowerCamelCase = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def snake_case_ ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE_ ( )-> str:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
_lowerCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(a__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
_lowerCamelCase = torch.tensor([[1, 2]] )
_lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 650 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Tuple )-> Dict:
_lowerCamelCase = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowerCamelCase = []
_lowerCamelCase = list(range(snake_case ) )
# Find permutation
while factorials:
_lowerCamelCase = factorials.pop()
_lowerCamelCase , _lowerCamelCase = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __a : Dict , __a : Optional[int]=3 , __a : Optional[Any]=32 , __a : int=3 , __a : Tuple=10 , __a : List[Any]=[10, 20, 30, 40] , __a : Tuple=[1, 1, 2, 1] , __a : Optional[int]=True , __a : List[str]=True , __a : List[Any]="relu" , __a : Union[str, Any]=3 , __a : List[Any]=None , ) -> Dict:
_UpperCamelCase : int = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : List[Any] = embeddings_size
_UpperCamelCase : List[Any] = hidden_sizes
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : str = num_labels
_UpperCamelCase : List[Any] = scope
_UpperCamelCase : Tuple = len(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : List[Any] , __a : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RegNetModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) -> str:
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : Optional[int] = RegNetForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
_UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :Tuple = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = RegNetModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Tuple = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(config=__a )
for name, module in model.named_modules():
if isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
def check_hidden_states_output(__a : Union[str, Any] , __a : Any , __a : int ):
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : int = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Tuple = layer_type
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = RegNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 713 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 |
from pathlib import Path
import fire
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path(snake_case )
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
dest_dir.mkdir(exist_ok=snake_case )
for path in src_dir.iterdir():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name )
print(snake_case )
dest_path.open('''w''' ).write('''\n'''.join(snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) | 285 |
"""simple docstring"""
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not input_list:
return []
_a = [input_list.count(_lowerCAmelCase ) for value in input_list]
_a = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 285 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = val
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE_ : List[str] = value
else:
SCREAMING_SNAKE_CASE_ : List[Any] = value
return new_state_dict
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Any=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ''
if is_panoptic:
SCREAMING_SNAKE_CASE_ : Any = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE_ : str = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[-2_56:]
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE_ : int = 'resnet101'
if "dc5" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'panoptic' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE_ : Tuple = 2_50
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 91
SCREAMING_SNAKE_CASE_ : Dict = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : int = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE_ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : List[str] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : str = idalabel
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE_ : int = 'coco_panoptic' if is_panoptic else 'coco_detection'
SCREAMING_SNAKE_CASE_ : List[Any] = ConditionalDetrImageProcessor(format=lowerCamelCase_ )
# prepare image
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : str = encoding['pixel_values']
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
SCREAMING_SNAKE_CASE_ : str = torch.hub.load('DeppMeng/ConditionalDETR' , lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE_ : int = 'conditional_detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = rename_backbone_keys(lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ : List[str] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
SCREAMING_SNAKE_CASE_ : Tuple = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE_ : str = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
SCREAMING_SNAKE_CASE_ : List[str] = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE_ : Tuple = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ : List[Any] = ConditionalDetrForSegmentation(lowerCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase_ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
SCREAMING_SNAKE_CASE_ : str = conditional_detr(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 105 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=36, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> str:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Dict = batch_size
UpperCAmelCase_: Optional[int] = seq_length
UpperCAmelCase_: int = is_training
UpperCAmelCase_: List[Any] = use_input_mask
UpperCAmelCase_: int = use_token_type_ids
UpperCAmelCase_: Tuple = use_labels
UpperCAmelCase_: Tuple = vocab_size
UpperCAmelCase_: int = hidden_size
UpperCAmelCase_: List[str] = num_hidden_layers
UpperCAmelCase_: List[str] = num_attention_heads
UpperCAmelCase_: Any = intermediate_size
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: Optional[int] = hidden_dropout_prob
UpperCAmelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_: List[str] = max_position_embeddings
UpperCAmelCase_: Optional[int] = type_vocab_size
UpperCAmelCase_: Tuple = type_sequence_label_size
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: str = num_labels
UpperCAmelCase_: str = num_choices
UpperCAmelCase_: Optional[int] = scope
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: List[str] = None
if self.use_input_mask:
UpperCAmelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_: str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: str = None
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Tuple = None
if self.use_labels:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> List[Any]:
return MraConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[Any] = self.get_config()
UpperCAmelCase_: Dict = 300
return config
def __snake_case (self ) -> List[Any]:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Any = self.prepare_config_and_inputs()
UpperCAmelCase_: List[str] = True
UpperCAmelCase_: Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Optional[int] = MraModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCAmelCase_: Any = True
UpperCAmelCase_: Union[str, Any] = MraModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: int = MraForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Any = MraForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: str = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, start_positions=SCREAMING_SNAKE_CASE_, end_positions=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.num_labels
UpperCAmelCase_: Any = MraForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Tuple = self.num_labels
UpperCAmelCase_: Any = MraForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Optional[Any] = self.num_choices
UpperCAmelCase_: int = MraForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Dict = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: List[str] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: List[Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: List[Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Any = config_and_inputs
UpperCAmelCase_: Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , unittest.TestCase ):
A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
A = False
A = False
A = False
A = False
A = ()
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Dict = MraModelTester(self )
UpperCAmelCase_: List[Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case (self ) -> str:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: str = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: List[str] = MraModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""MRA does not output attentions""" )
def __snake_case (self ) -> List[str]:
return
@require_torch
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
UpperCAmelCase_: Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Optional[int] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
@slow
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
UpperCAmelCase_: Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Any = 50265
UpperCAmelCase_: Any = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
@slow
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[int] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
UpperCAmelCase_: List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Dict = 50265
UpperCAmelCase_: int = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
| 556 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : CommonSchedulerState
# setable values
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def __snake_case ( cls :Optional[int] , __magic_name__ :CommonSchedulerState , __magic_name__ :jnp.ndarray , __magic_name__ :jnp.ndarray ) ->Optional[int]:
return cls(common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ )
@dataclass
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : DDPMSchedulerState
class UpperCamelCase (__snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : jnp.dtype
@property
def __snake_case ( self :int ) ->List[Any]:
return True
@register_to_config
def __init__( self :str , __magic_name__ :int = 1_000 , __magic_name__ :float = 0.0001 , __magic_name__ :float = 0.02 , __magic_name__ :str = "linear" , __magic_name__ :Optional[jnp.ndarray] = None , __magic_name__ :str = "fixed_small" , __magic_name__ :bool = True , __magic_name__ :str = "epsilon" , __magic_name__ :jnp.dtype = jnp.floataa , ) ->List[str]:
lowercase : Tuple = dtype
def __snake_case ( self :Union[str, Any] , __magic_name__ :Optional[CommonSchedulerState] = None ) ->DDPMSchedulerState:
if common is None:
lowercase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase : Optional[Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ , )
def __snake_case ( self :Dict , __magic_name__ :DDPMSchedulerState , __magic_name__ :jnp.ndarray , __magic_name__ :Optional[int] = None ) ->jnp.ndarray:
return sample
def __snake_case ( self :Optional[int] , __magic_name__ :DDPMSchedulerState , __magic_name__ :int , __magic_name__ :Tuple = () ) ->DDPMSchedulerState:
lowercase : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (jnp.arange(0 , __magic_name__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__magic_name__ , timesteps=__magic_name__ , )
def __snake_case ( self :Dict , __magic_name__ :DDPMSchedulerState , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any]=None , __magic_name__ :Any=None ) ->Tuple:
lowercase : Any = state.common.alphas_cumprod[t]
lowercase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase : Optional[int] = jnp.clip(__magic_name__ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase : Union[str, Any] = jnp.log(jnp.clip(__magic_name__ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowercase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase : Union[str, Any] = variance
lowercase : str = state.common.betas[t]
lowercase : Dict = (predicted_variance + 1) / 2
lowercase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def __snake_case ( self :Optional[Any] , __magic_name__ :DDPMSchedulerState , __magic_name__ :jnp.ndarray , __magic_name__ :int , __magic_name__ :jnp.ndarray , __magic_name__ :Optional[jax.random.KeyArray] = None , __magic_name__ :bool = True , ) ->Union[FlaxDDPMSchedulerOutput, Tuple]:
lowercase : Tuple = timestep
if key is None:
lowercase : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase : Optional[int] = jnp.split(__magic_name__ , sample.shape[1] , axis=1 )
else:
lowercase : int = None
# 1. compute alphas, betas
lowercase : Dict = state.common.alphas_cumprod[t]
lowercase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase : List[Any] = 1 - alpha_prod_t
lowercase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase : List[str] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase : Tuple = jnp.clip(__magic_name__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase : Any = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase : Union[str, Any] = jax.random.split(__magic_name__ , num=1 )
lowercase : Union[str, Any] = jax.random.normal(__magic_name__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__magic_name__ , __magic_name__ , predicted_variance=__magic_name__ ) ** 0.5) * noise
lowercase : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__magic_name__ , state=__magic_name__ )
def __snake_case ( self :int , __magic_name__ :DDPMSchedulerState , __magic_name__ :jnp.ndarray , __magic_name__ :jnp.ndarray , __magic_name__ :jnp.ndarray , ) ->jnp.ndarray:
return add_noise_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def __snake_case ( self :Any , __magic_name__ :DDPMSchedulerState , __magic_name__ :jnp.ndarray , __magic_name__ :jnp.ndarray , __magic_name__ :jnp.ndarray , ) ->jnp.ndarray:
return get_velocity_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def __len__( self :Dict ) ->Union[str, Any]:
return self.config.num_train_timesteps
| 704 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _A , _A , _A=0 ) -> Any:
# Format the message.
if name is None:
lowercase : Tuple = None
else:
lowercase : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase : List[str] = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase : Dict = param.view(*_A )
lowercase : str = param.transpose(0 , 2 )
lowercase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase : Any = param.view(*_A )
lowercase : Optional[int] = param.transpose(0 , 1 ).contiguous()
lowercase : Any = param.view(*_A )
return param
def UpperCamelCase ( _A , _A , _A ) -> List[str]:
# The converted output model.
lowercase : str = {}
# old versions did not store training args
lowercase : Optional[int] = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase : List[Any] = ds_args.padded_vocab_size
lowercase : int = ds_args.max_position_embeddings
lowercase : Optional[Any] = ds_args.hidden_size
lowercase : int = ds_args.num_layers
lowercase : Union[str, Any] = ds_args.num_attention_heads
lowercase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase : int = config.n_head
# The hidden_size per head.
lowercase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase : List[str] = input_state_dict["""checkpoint_version"""]
else:
lowercase : List[str] = 0.0
# The model.
lowercase : Tuple = input_state_dict["""model"""]
# The language model.
lowercase : Optional[int] = model["""language_model"""]
# The embeddings.
lowercase : Optional[int] = lm["""embedding"""]
# The word embeddings.
lowercase : Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase : Tuple = word_embeddings[: config.vocab_size, :]
lowercase : Tuple = word_embeddings
# The position embeddings.
lowercase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowercase : Optional[int] = pos_embeddings
# The transformer.
lowercase : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowercase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase : int = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
lowercase : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase : Dict = m.group(3 )
# The name of the layer.
lowercase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowercase : List[str] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowercase : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowercase : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase : str = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase : Tuple = masked_bias
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowercase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase : Optional[int] = megatron_to_transformers[op_name]
lowercase : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase : Union[str, Any] = megatron_to_transformers[op_name]
lowercase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase : Dict = transformer["""final_layernorm.weight"""]
lowercase : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase : int = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> int:
# Create the argument parser.
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
lowercase : Dict = parser.parse_args()
# Extract the basename.
lowercase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowercase : Any = torch.load(_A , map_location="""cpu""" )
else:
lowercase : Tuple = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowercase : Dict = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase : Optional[int] = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase : int = """gelu_new"""
else:
lowercase : Tuple = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase : List[str] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase : Optional[Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
lowercase : int = GPTaConfig.from_json_file(args.config_file )
lowercase : Optional[Any] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowercase : List[str] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase : Optional[Any] = """gpt2"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : Union[str, Any] = type(_A ).__name__
lowercase : Any = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowercase : Any = os.path.join(_A , """pytorch_model.bin""" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 348 | 0 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase_ = """src/transformers"""
# Matches is_xxx_available()
lowercase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase_ = re.compile(R"""^\s*else:""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall('\[([^\]]+)\]' , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCamelCase () -> Tuple:
lowercase__ = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase__ = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '.' )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowercase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase () -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ = spec.loader.load_module()
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 235 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_UpperCAmelCase = random.Random()
def UpperCamelCase ( lowercase_ : str , lowercase_ : Optional[int]=1.0 , lowercase_ : Any=None , lowercase_ : Optional[int]=None ) -> Any:
'''simple docstring'''
if rng is None:
lowercase =global_rng
lowercase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_00 , snake_case_=20_00 , snake_case_=1 , snake_case_=0.0 , snake_case_=1_60_00 , snake_case_=True , snake_case_=80 , snake_case_=16 , snake_case_=64 , snake_case_="hann_window" , snake_case_=80 , snake_case_=76_00 , snake_case_=1E-10 , snake_case_=True , ):
lowercase =parent
lowercase =batch_size
lowercase =min_seq_length
lowercase =max_seq_length
lowercase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase =feature_size
lowercase =padding_value
lowercase =sampling_rate
lowercase =do_normalize
lowercase =num_mel_bins
lowercase =hop_length
lowercase =win_length
lowercase =win_function
lowercase =fmin
lowercase =fmax
lowercase =mel_floor
lowercase =return_attention_mask
def _A( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _A( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
lowercase =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase =[np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def _A( self , snake_case_=False , snake_case_=False ):
if equal_length:
lowercase =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase =[np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( __a , unittest.TestCase ):
UpperCamelCase__ = SpeechTaFeatureExtractor
def _A( self ):
lowercase =SpeechTaFeatureExtractionTester(self )
def _A( self , snake_case_ ):
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1E-3 ) )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase =[np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase =feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase =feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# Test batched
lowercase =feat_extract(a_ , return_tensors='''np''' ).input_values
lowercase =feat_extract(a_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase =["""longest""", """max_length""", """do_not_pad"""]
lowercase =[None, 16_00, None]
for max_length, padding in zip(a_ , a_ ):
lowercase =feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='''np''' )
lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase =range(8_00 , 14_00 , 2_00 )
lowercase =[floats_list((1, x) )[0] for x in lengths]
lowercase =["""longest""", """max_length""", """do_not_pad"""]
lowercase =[None, 16_00, None]
for max_length, padding in zip(a_ , a_ ):
lowercase =feat_extract(a_ , max_length=a_ , padding=a_ )
lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase =feat_extract(
a_ , truncation=a_ , max_length=10_00 , padding='''max_length''' , return_tensors='''np''' )
lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase =feat_extract(
a_ , truncation=a_ , max_length=10_00 , padding='''longest''' , return_tensors='''np''' )
lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
lowercase =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase =feat_extract(
a_ , truncation=a_ , max_length=20_00 , padding='''longest''' , return_tensors='''np''' )
lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase =np.random.rand(1_00 ).astype(np.floataa )
lowercase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase =feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase =feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase =[np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
lowercase =feature_extractor(audio_target=a_ , padding=a_ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# Test batched
lowercase =feature_extractor(a_ , return_tensors='''np''' ).input_values
lowercase =feature_extractor(a_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase =[floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowercase =np.asarray(a_ )
lowercase =feature_extractor(a_ , return_tensors='''np''' ).input_values
lowercase =feature_extractor(a_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
def _A( self ):
lowercase =self.feat_extract_tester.prepare_inputs_for_target()
lowercase =self.feature_extraction_class(**self.feat_extract_dict )
lowercase =feat_extract.model_input_names[0]
lowercase =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
lowercase =self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
lowercase =BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _A( self ):
lowercase =self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
lowercase =self.feature_extraction_class(**self.feat_extract_dict )
lowercase =feat_extract.model_input_names[0]
lowercase =BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _A( self ):
lowercase =self.feature_extraction_class(**self.feat_extract_dict )
lowercase =self.feat_extract_tester.prepare_inputs_for_target()
lowercase =feat_extract.model_input_names[0]
lowercase =BatchFeature({input_name: speech_inputs} )
lowercase =feat_extract.num_mel_bins # hack!
lowercase =feat_extract.pad(a_ , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase =feat_extract.pad(a_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _A( self ):
lowercase =self.feat_extract_dict
lowercase =True
lowercase =self.feature_extraction_class(**a_ )
lowercase =self.feat_extract_tester.prepare_inputs_for_target()
lowercase =[len(a_ ) for x in speech_inputs]
lowercase =feat_extract.model_input_names[0]
lowercase =BatchFeature({input_name: speech_inputs} )
lowercase =feat_extract.num_mel_bins # hack!
lowercase =feat_extract.pad(a_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def _A( self ):
lowercase =self.feat_extract_dict
lowercase =True
lowercase =self.feature_extraction_class(**a_ )
lowercase =self.feat_extract_tester.prepare_inputs_for_target()
lowercase =[len(a_ ) for x in speech_inputs]
lowercase =feat_extract.model_input_names[0]
lowercase =BatchFeature({input_name: speech_inputs} )
lowercase =min(a_ )
lowercase =feat_extract.num_mel_bins # hack!
lowercase =feat_extract.pad(
a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _A( self , snake_case_ ):
from datasets import load_dataset
lowercase =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowercase =ds.sort('''id''' ).select(range(a_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _A( self ):
lowercase =torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
lowercase =self._load_datasamples(1 )
lowercase =SpeechTaFeatureExtractor()
lowercase =feature_extractor(a_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1E-6 ) )
def _A( self ):
lowercase =torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
lowercase =self._load_datasamples(1 )
lowercase =SpeechTaFeatureExtractor()
lowercase =feature_extractor(audio_target=a_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1E-4 ) )
| 706 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] = None ) -> Tuple:
'''simple docstring'''
lowercase =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase =to_pil_image(lowercase_ )
lowercase , lowercase =pil_image.size
lowercase =pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowercase , lowercase , lowercase , lowercase , lowercase =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase =[idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowercase =[word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase =[]
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase =[x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowercase =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase =get_size_dict(snake_case_ )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =apply_ocr
lowercase =ocr_lang
lowercase =tesseract_config
def _A( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase =(size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ )
lowercase =resample if resample is not None else self.resample
lowercase =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase =[]
lowercase =[]
for image in images:
lowercase , lowercase =apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase =[flip_channel_order(snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
lowercase =words_batch
lowercase =boxes_batch
return data
| 145 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 5000_0000 ) -> int:
"""simple docstring"""
__lowerCamelCase = set()
__lowerCamelCase = int((limit - 24) ** (1 / 2) )
__lowerCamelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__ ) ) )
for primea in primes:
__lowerCamelCase = primea * primea
for primea in primes:
__lowerCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCamelCase = primea * primea * primea * primea
__lowerCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 469 |
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__lowerCamelCase = 0
for ch in input_str:
__lowerCamelCase = ord(UpperCamelCase__ )
__lowerCamelCase = pow(2 , UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = '''van'''
def __init__( self : str , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : str=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[64, 128, 320, 512] , UpperCAmelCase_ : Union[str, Any]=[3, 3, 12, 3] , UpperCAmelCase_ : Any=[8, 8, 4, 4] , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[int]=1e-6 , UpperCAmelCase_ : Optional[Any]=1e-2 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : List[str] , )-> Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = patch_sizes
UpperCamelCase = strides
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = mlp_ratios
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = layer_scale_init_value
UpperCamelCase = drop_path_rate
UpperCamelCase = dropout_rate
| 556 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ = False )-> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
UpperCamelCase = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
UpperCamelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
UpperCamelCase = primes[:idx]
break
UpperCamelCase , UpperCamelCase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCamelCase = False
for r in range(UpperCAmelCase_ ):
UpperCamelCase = pow(UpperCAmelCase_ , d * 2**r , UpperCAmelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCamelCase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase__ ( )-> None:
"""simple docstring"""
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 556 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__snake_case : Dict = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( A_ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( A_ , A_ , A_ ):
return max(metric_fn(A_ , A_ ) for gt in ground_truths )
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ = [line.strip() for line in open(A_ , "r" ).readlines()]
UpperCAmelCase_ = []
if args.gold_data_mode == "qa":
UpperCAmelCase_ = pd.read_csv(A_ , sep="\t" , header=A_ )
for answer_list in data[1]:
UpperCAmelCase_ = ast.literal_eval(A_ )
answers.append(A_ )
else:
UpperCAmelCase_ = [line.strip() for line in open(A_ , "r" ).readlines()]
UpperCAmelCase_ = [[reference] for reference in references]
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
for prediction, ground_truths in zip(A_ , A_ ):
total += 1
em += metric_max_over_ground_truths(A_ , A_ , A_ )
fa += metric_max_over_ground_truths(A_ , A_ , A_ )
UpperCAmelCase_ = 100.0 * em / total
UpperCAmelCase_ = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ = args.k
UpperCAmelCase_ = [line.strip() for line in open(A_ , "r" ).readlines()]
UpperCAmelCase_ = [line.strip() for line in open(A_ , "r" ).readlines()]
UpperCAmelCase_ = UpperCAmelCase_ = 0
for hypo, reference in zip(A_ , A_ ):
UpperCAmelCase_ = set(hypo.split("\t" )[:k] )
UpperCAmelCase_ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase_ = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase__ ( A_ , A_ , A_ ):
def strip_title(A_ ):
if title.startswith("\"" ):
UpperCAmelCase_ = title[1:]
if title.endswith("\"" ):
UpperCAmelCase_ = title[:-1]
return title
UpperCAmelCase_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A_ , return_tensors="pt" , padding=A_ , truncation=A_ , )["input_ids"].to(args.device )
UpperCAmelCase_ = rag_model.rag.question_encoder(A_ )
UpperCAmelCase_ = question_enc_outputs[0]
UpperCAmelCase_ = rag_model.retriever(
A_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCAmelCase_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase_ = []
for docs in all_docs:
UpperCAmelCase_ = [strip_title(A_ ) for title in docs["title"]]
provenance_strings.append("\t".join(A_ ) )
return provenance_strings
def lowerCamelCase__ ( A_ , A_ , A_ ):
with torch.no_grad():
UpperCAmelCase_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A_ , return_tensors="pt" , padding=A_ , truncation=A_ )
UpperCAmelCase_ = inputs_dict.input_ids.to(args.device )
UpperCAmelCase_ = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase_ = rag_model.generate( # rag_model overwrites generate
A_ , attention_mask=A_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase_ = rag_model.retriever.generator_tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
if args.print_predictions:
for q, a in zip(A_ , A_ ):
logger.info("Q: {} - A: {}".format(A_ , A_ ) )
return answers
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=A_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=A_ , choices=["exact", "compressed", "legacy"] , type=A_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=A_ , type=A_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=A_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=A_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=A_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=A_ , type=A_ , required=A_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=A_ , type=A_ , required=A_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=A_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=A_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=A_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=A_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=A_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=A_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( A_ ):
UpperCAmelCase_ = {}
if args.model_type is None:
UpperCAmelCase_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCAmelCase_ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCAmelCase_ = args.n_docs
if args.index_name is not None:
UpperCAmelCase_ = args.index_name
if args.index_path is not None:
UpperCAmelCase_ = args.index_path
else:
UpperCAmelCase_ = BartForConditionalGeneration
UpperCAmelCase_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , A_ )
UpperCAmelCase_ = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCAmelCase_ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(A_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(A_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCAmelCase_ = RagRetriever.from_pretrained(A_ , **A_ )
UpperCAmelCase_ = model_class.from_pretrained(A_ , retriever=A_ , **A_ )
model.retriever.init_retrieval()
else:
UpperCAmelCase_ = model_class.from_pretrained(A_ , **A_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCAmelCase_ = []
for line in tqdm(A_ ):
questions.append(line.strip() )
if len(A_ ) == args.eval_batch_size:
UpperCAmelCase_ = evaluate_batch_fn(A_ , A_ , A_ )
preds_file.write("\n".join(A_ ) + "\n" )
preds_file.flush()
UpperCAmelCase_ = []
if len(A_ ) > 0:
UpperCAmelCase_ = evaluate_batch_fn(A_ , A_ , A_ )
preds_file.write("\n".join(A_ ) )
preds_file.flush()
score_fn(A_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__snake_case : int = get_args()
main(args)
| 660 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 1 |
from __future__ import annotations
lowercase : Any = 1.6021e-19 # units = C
def A_ ( A__ , A__ , A__ , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : int = regularization
a__ : int = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : List[str] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : Tuple = observations
a__ : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Tuple = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[Any] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : str = LinearConstraint(lowercase , 0 , 0)
a__ : List[Any] = Bounds(0 , self.regularization)
a__ : Optional[int] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : str = l_star
# calculating mean offset of separation plane to points
a__ : Optional[int] = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : str = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''AutoImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Union[str, Any]:
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,__a ,)
A = kwargs.pop("""feature_extractor""" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a ,__a )
A = self.image_processor
A = False
def __call__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a ,**__a )
A = kwargs.pop("""images""" ,__a )
A = kwargs.pop("""text""" ,__a )
if len(__a ) > 0:
A = args[0]
A = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
A = self.image_processor(__a ,*__a ,**__a )
if text is not None:
A = self.tokenizer(__a ,**__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
A = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*__a ,**__a )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[Any]:
return self.tokenizer.decode(*__a ,**__a )
@contextmanager
def UpperCamelCase__ ( self ) -> List[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
A = True
A = self.tokenizer
yield
A = self.image_processor
A = False
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=None ) -> List[str]:
if added_vocab is None:
A = self.tokenizer.get_added_vocab()
A = {}
while tokens:
A = re.search(r"""<s_(.*?)>""" ,__a ,re.IGNORECASE )
if start_token is None:
break
A = start_token.group(1 )
A = re.search(rf'</s_{key}>' ,__a ,re.IGNORECASE )
A = start_token.group()
if end_token is None:
A = tokens.replace(__a ,"""""" )
else:
A = end_token.group()
A = re.escape(__a )
A = re.escape(__a )
A = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' ,__a ,re.IGNORECASE )
if content is not None:
A = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
A = self.tokenajson(__a ,is_inner_value=__a ,added_vocab=__a )
if value:
if len(__a ) == 1:
A = value[0]
A = value
else: # leaf nodes
A = []
for leaf in content.split(r"""<sep/>""" ):
A = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
A = leaf[1:-2] # for categorical special tokens
output[key].append(__a )
if len(output[key] ) == 1:
A = output[key][0]
A = tokens[tokens.find(__a ) + len(__a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=__a ,added_vocab=__a )
if len(__a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase__ ( self ) -> Any:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,__a ,)
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,__a ,)
return self.image_processor
| 617 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int , __a : Optional[int] , __a : Union[str, Any] ) -> str:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'''
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : List[Any] , __a : Union[str, Any]=0 , __a : List[str]=(4, 4, 64, 64) , __a : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def A_ ( self : Any , __a : Any=False , __a : Dict="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : int = 'bf16' if fpaa else None
__snake_case , __snake_case : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='unet' , dtype=__a , revision=__a )
return model, params
def A_ ( self : Any , __a : Dict=0 , __a : Dict=(4, 77, 768) , __a : List[str]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=__a )
__snake_case : Tuple = self.get_latents(__a , fpaa=__a )
__snake_case : int = self.get_encoder_hidden_states(__a , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : str = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def A_ ( self : str , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=__a )
__snake_case : int = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a )
__snake_case : Optional[Any] = self.get_encoder_hidden_states(__a , shape=(4, 77, 1024) , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : int = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 286 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 476 | import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self , lowerCAmelCase__ ):
_A= 3
_A= 250
_A= ids_tensor((batch_size, length) , lowerCAmelCase__ )
_A= torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def a__ ( self ):
_A, _A= self._get_tensors(5 )
_A= StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A, _A= self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A, _A= self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( self ):
_A= MaxLengthCriteria(max_length=10 )
_A, _A= self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A, _A= self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A, _A= self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( self ):
_A= MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_A, _A= self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A, _A= self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A, _A= self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A= StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__ ( self ):
_A, _A= self._get_tensors(5 )
_A= MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_A= MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_A= validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 ) | 476 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ['input_ids', 'attention_mask']
def __init__( self :Tuple , a :List[Any]="</s>" , a :str="<unk>" , a :int="<pad>" , a :Optional[int]=1_2_5 , a :List[Any]=None , **a :Any , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCamelCase : Any = [f'<extra_id_{i}>' for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCamelCase : str = len(set(filter(lambda a : bool("extra_id" in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__UpperCamelCase : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
__UpperCamelCase : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
__UpperCamelCase : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , **a , )
__UpperCamelCase : List[str] = extra_ids
__UpperCamelCase : Optional[Any] = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCamelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCamelCase : List[Any] = len(self.special_tokens_encoder )
__UpperCamelCase : int = len(a )
for i, token in enumerate(a ):
__UpperCamelCase : Tuple = self.vocab_size + i - n
__UpperCamelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowerCamelCase ( self :Optional[int] ) -> str:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowerCamelCase ( self :Tuple , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self :List[Any] , a :List[int] ) -> List[int]:
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self :Optional[Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Union[str, Any] = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
__UpperCamelCase : Optional[Any] = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def _lowerCamelCase ( self :int , a :str ) -> List[str]:
__UpperCamelCase : str = [chr(a ) for i in text.encode("utf-8" )]
return tokens
def _lowerCamelCase ( self :Dict , a :Dict ) -> List[str]:
if token in self.special_tokens_encoder:
__UpperCamelCase : Optional[int] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCamelCase : int = self.added_tokens_encoder[token]
elif len(a ) != 1:
__UpperCamelCase : Any = self.unk_token_id
else:
__UpperCamelCase : Tuple = ord(a ) + self._num_special_tokens
return token_id
def _lowerCamelCase ( self :int , a :int ) -> int:
if index in self.special_tokens_decoder:
__UpperCamelCase : List[str] = self.special_tokens_decoder[index]
else:
__UpperCamelCase : Optional[Any] = chr(index - self._num_special_tokens )
return token
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
__UpperCamelCase : int = b""
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCamelCase : Dict = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__UpperCamelCase : List[Any] = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__UpperCamelCase : List[Any] = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__UpperCamelCase : Optional[int] = token.encode("utf-8" )
else:
__UpperCamelCase : int = bytes([ord(a )] )
bstring += tok_string
__UpperCamelCase : List[str] = bstring.decode("utf-8" , errors="ignore" )
return string
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
return () | 557 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = 0
while number > 0:
__UpperCamelCase : List[Any] = number % 10
sum_of_digits += last_digit
__UpperCamelCase : List[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = factorial(_lowerCamelCase)
__UpperCamelCase : Tuple = split_and_add(_lowerCamelCase)
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 557 | 1 |
"""simple docstring"""
import math
A: List[str] = 1_0
A: List[Any] = 7
A: Dict = BALLS_PER_COLOUR * NUM_COLOURS
def _snake_case ( UpperCamelCase : Any = 20 ):
UpperCAmelCase : Optional[Any] = math.comb(__A , __A )
UpperCAmelCase : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __A )
UpperCAmelCase : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return F"{result:.9f}"
if __name__ == "__main__":
print(solution(2_0))
| 715 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[Any] = TransfoXLTokenizer
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase : Optional[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = """<unk> UNwanted , running"""
UpperCAmelCase : Dict = """<unk> unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [0, 4, 8, 7] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TransfoXLTokenizer(lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = TransfoXLTokenizer(lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCAmelCase : Optional[Any] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 359 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=3 , a__=0.6 , a__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = ViTMAEModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Any = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Dict = False
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
snake_case_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = torch.from_numpy(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = pt_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
model.to(a__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
snake_case_ = outputs[0].cpu().numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
snake_case_ = model_class.from_pretrained(a__ )
model.to(a__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
# Make sure we don't have nans
snake_case_ = after_outputs[0].cpu().numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
snake_case_ = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(a__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ , noise=torch.from_numpy(a__ ).to(device=a__ ) )
# verify the logits
snake_case_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a__ ) , atol=1e-4 ) )
| 400 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : str = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["input_ids", "attention_mask"]
lowerCAmelCase_ : Dict = BartTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , a__=True , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , a__ , a__ )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , a__ ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(a__ , state.pop("type" ) )
snake_case_ = component_class(**a__ )
setattr(self.backend_tokenizer , a__ , a__ )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value
snake_case_ = value
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = kwargs.get("is_split_into_words" , a__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a__ , **a__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = kwargs.get("is_split_into_words" , a__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ , a__=None ) -> int:
'''simple docstring'''
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 400 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase_ : Optional[int] = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
lowerCAmelCase_ : Optional[int] = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowerCAmelCase_ : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowerCAmelCase_ : Tuple = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Any = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
lowerCAmelCase_ : int = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Union[str, Any] = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
lowerCAmelCase_ : List[str] = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
lowerCAmelCase_ : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : str = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
lowerCAmelCase_ : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
lowerCAmelCase_ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
lowerCAmelCase_ : str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
lowerCAmelCase_ : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
lowerCAmelCase_ : int = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
lowerCAmelCase_ : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
lowerCAmelCase_ : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
lowerCAmelCase_ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
lowerCAmelCase_ : str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : int = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
lowerCAmelCase_ : str = ''
lowerCAmelCase_ : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
lowerCAmelCase_ : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase_ : str = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[str] ) -> List[str]:
assert ReadMe.from_string(lowercase , lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[int] ) -> Any:
with pytest.raises(lowercase , match=re.escape(expected_error.format(path="root" ) ) ):
_a = ReadMe.from_string(lowercase , lowercase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : str ) -> List[str]:
with pytest.raises(lowercase , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(lowercase , lowercase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple:
ReadMe.from_string(lowercase , lowercase , suppress_parsing_errors=lowercase )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Union[str, Any] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_a = Path(lowercase ) / "README.md"
with open(lowercase , "w+" ) as readme_file:
readme_file.write(lowercase )
_a = ReadMe.from_readme(lowercase , lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
_a = Path(lowercase ) / "README.md"
with open(lowercase , "w+" ) as readme_file:
readme_file.write(lowercase )
_a = expected_error.format(path=lowercase )
with pytest.raises(lowercase , match=re.escape(lowercase ) ):
_a = ReadMe.from_readme(lowercase , lowercase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( lowercase : List[str] , lowercase : List[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
_a = Path(lowercase ) / "README.md"
with open(lowercase , "w+" ) as readme_file:
readme_file.write(lowercase )
_a = expected_error.format(path=lowercase )
with pytest.raises(lowercase , match=re.escape(lowercase ) ):
ReadMe.from_readme(lowercase , lowercase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
_a = Path(lowercase ) / "README.md"
with open(lowercase , "w+" ) as readme_file:
readme_file.write(lowercase )
ReadMe.from_readme(lowercase , lowercase , suppress_parsing_errors=lowercase )
| 521 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Any , lowercase : Any ) -> Any:
# Initialise PyTorch model
_a = FunnelConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 521 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : Dict ):
_UpperCAmelCase = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
_UpperCAmelCase = {
"input_ids": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_UpperCAmelCase = model(snake_case_ )["last_hidden_state"]
_UpperCAmelCase = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 236 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCAmelCase_ ( __lowercase : int ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_UpperCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_UpperCAmelCase , _UpperCAmelCase = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_UpperCAmelCase = parse_unknown_args(__lowercase )
# Run
_UpperCAmelCase = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 236 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: Tuple = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_lowercase: str = DatasetInfosDict.from_directory(__magic_name__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: Tuple = str(__magic_name__ )
dataset_info.write_to_directory(__magic_name__ )
_lowercase: List[str] = DatasetInfo.from_directory(__magic_name__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__magic_name__ , "dataset_info.json" ) )
def __lowerCAmelCase ( ):
_lowercase: List[str] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_lowercase: str = dataset_info._to_yaml_dict()
assert sorted(__magic_name__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowercase: List[Any] = yaml.safe_dump(__magic_name__ )
_lowercase: Tuple = yaml.safe_load(__magic_name__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCAmelCase ( ):
_lowercase: List[Any] = DatasetInfo()
_lowercase: Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: Tuple = str(__magic_name__ )
dataset_infos_dict.write_to_directory(__magic_name__ )
_lowercase: Optional[Any] = DatasetInfosDict.from_directory(__magic_name__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowercase: Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowercase: Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__magic_name__ , "README.md" ) )
| 206 |
_SCREAMING_SNAKE_CASE : dict[tuple[int, int, int], int] = {}
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowercase: Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowercase: Tuple = _calculate(days - 1 , __magic_name__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowercase: Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowercase: Tuple = _calculate(days - 1 , __magic_name__ , 0 )
_lowercase: List[str] = state_late + state_absent + state_ontime
_lowercase: Optional[int] = prizestrings
return prizestrings
def __lowerCAmelCase ( __magic_name__ = 3_0 ):
return _calculate(__magic_name__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 206 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = """encoder-decoder"""
UpperCamelCase = True
def __init__( self :str , **__snake_case :Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__magic_name__ : List[Any] =kwargs.pop("""encoder""" )
__magic_name__ : int =encoder_config.pop("""model_type""" )
__magic_name__ : Tuple =kwargs.pop("""decoder""" )
__magic_name__ : Dict =decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__magic_name__ : Dict =AutoConfig.for_model(__snake_case , **__snake_case )
__magic_name__ : int =AutoConfig.for_model(__snake_case , **__snake_case )
__magic_name__ : Tuple =True
@classmethod
def A__ ( cls :Tuple , __snake_case :PretrainedConfig , __snake_case :PretrainedConfig , **__snake_case :List[Any] ):
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__magic_name__ : List[Any] =True
__magic_name__ : Union[str, Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
__magic_name__ : str =self.encoder.to_dict()
__magic_name__ : str =self.decoder.to_dict()
__magic_name__ : int =self.__class__.model_type
return output
| 21 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 317 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase__ ( snake_case__ ):
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_: str = 5
# Realm tok
UpperCAmelCase_: Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_: int = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(A__ , exist_ok=A__ )
UpperCAmelCase_: Union[str, Any] = os.path.join(A__ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_: Tuple = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(A__ , exist_ok=A__ )
def snake_case_ ( self ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def snake_case_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=A__ , )
return block_records
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.get_config()
UpperCAmelCase_: List[str] = self.get_dummy_retriever()
UpperCAmelCase_: Optional[int] = retriever.tokenizer
UpperCAmelCase_: Tuple = np.array([0, 3] , dtype="long" )
UpperCAmelCase_: Any = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_: int = tokenizer(
["the fourth"] , add_special_tokens=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , ).input_ids
UpperCAmelCase_: Optional[Any] = config.reader_seq_len
UpperCAmelCase_: Any = retriever(
A__ , A__ , answer_ids=A__ , max_length=A__ , return_tensors="np" )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.get_config()
UpperCAmelCase_: Optional[Any] = self.get_dummy_retriever()
UpperCAmelCase_: Optional[Any] = retriever.tokenizer
UpperCAmelCase_: int = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_: Optional[Any] = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_: int = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , ).input_ids
UpperCAmelCase_: Dict = config.reader_seq_len
UpperCAmelCase_: Optional[int] = retriever(
A__ , A__ , answer_ids=A__ , max_length=A__ , return_tensors="np" )
self.assertEqual([False, True, True] , A__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_: Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_: Optional[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_: str = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" ) | 703 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCAmelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ,_a=None ) -> int:
# Initialise PyTorch model
UpperCAmelCase_: Dict = XLNetConfig.from_json_file(_a )
UpperCAmelCase_: List[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
UpperCAmelCase_: int = finetuning_task
UpperCAmelCase_: List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_: Tuple = XLNetForSequenceClassification(_a )
elif "squad" in finetuning_task:
UpperCAmelCase_: Any = finetuning_task
UpperCAmelCase_: Tuple = XLNetForQuestionAnswering(_a )
else:
UpperCAmelCase_: int = XLNetLMHeadModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_a ,_a ,_a )
# Save pytorch-model
UpperCAmelCase_: List[Any] = os.path.join(_a ,_a )
UpperCAmelCase_: Union[str, Any] = os.path.join(_a ,_a )
print(f"Save PyTorch model to {os.path.abspath(_a )}" )
torch.save(model.state_dict() ,_a )
print(f"Save configuration file to {os.path.abspath(_a )}" )
with open(_a ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_lowerCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 306 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ):
A = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def _snake_case ( snake_case__ : int ):
A = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print('' )
print(len(snake_case__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 91 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''mobilenet_v1'''
def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
A = num_channels
A = image_size
A = depth_multiplier
A = min_depth
A = hidden_act
A = tf_padding
A = classifier_dropout_prob
A = initializer_range
A = layer_norm_eps
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-4 | 91 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE ={
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE =[
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE =[
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = []
for line in lines:
__snake_case = re.sub(r"#.*" , "" , SCREAMING_SNAKE_CASE ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE )
__snake_case = "\n".join(SCREAMING_SNAKE_CASE )
# Make a hash from all this code
__snake_case = full_str.encode("utf-8" )
return shaaaa(SCREAMING_SNAKE_CASE ).hexdigest()
# get importable module names and hash for caching
_SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
_SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 614 | 0 |
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCamelCase : int = 1
_lowerCamelCase : List[Any] = 1
while repunit:
_lowerCamelCase : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ) ->int:
_lowerCamelCase : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 434 | """simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE__ : Tuple =sys.version_info >= (3, 10)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[int]:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = False
__snake_case = True
__snake_case = None
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """titi"""
__snake_case = """toto"""
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """titi"""
__snake_case = """toto"""
__snake_case = 42
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = field(default=a_ , metadata={"""help""": """help message"""} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[1, 2, 3] )
__snake_case = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__snake_case = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = field()
__snake_case = field()
__snake_case = field()
def a__ ( self ) -> Dict:
_lowerCamelCase : str = BasicEnum(self.required_enum )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = field()
__snake_case = None
__snake_case = field(default="""toto""" , metadata={"""help""": """help message"""} )
__snake_case = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = False
__snake_case = True
__snake_case = None
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = field(default=a_ , metadata={"""help""": """help message"""} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCamelCase : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != '''container'''}
_lowerCamelCase : str = {k: v for k, v in vars(_lowercase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowercase ) and yy.get('''choices''' , _lowercase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowercase ) , yy['''type'''](_lowercase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : str = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--bar''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--baz''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--flag''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : str = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_lowerCamelCase), ) : Union[str, Any] = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase )
self.assertFalse(example.flag )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Dict = HfArgumentParser(_lowercase )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''' )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowercase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase )
_lowerCamelCase : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
_lowerCamelCase : Optional[Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : List[Any] = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : int = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : Any = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : Dict = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowerCamelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCamelCase : Optional[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowerCamelCase : Any = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCamelCase : str = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_lowerCamelCase : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def a__ ( self ) -> Dict:
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
_lowerCamelCase : Dict = HfArgumentParser(_lowercase )
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowerCamelCase : Optional[int] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowerCamelCase : Optional[int] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : int = HfArgumentParser(_lowercase )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowercase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowercase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : int = parser.parse_args([] )
self.assertEqual(
_lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCamelCase : List[Any] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowercase , type=_lowercase )
expected.add_argument('''--bar''' , default=_lowercase , type=_lowercase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowercase , type=_lowercase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowercase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowercase )
_lowerCamelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
_lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : List[str] = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[] ) )
_lowerCamelCase : List[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def a__ ( self ) -> Any:
_lowerCamelCase : str = HfArgumentParser(_lowercase )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--required_str''' , type=_lowercase , required=_lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowercase , )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Dict:
_lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowercase , )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
_lowerCamelCase : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_lowerCamelCase : Optional[int] = parser.parse_dict(_lowercase )[0]
_lowerCamelCase : str = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Any = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = os.path.join(_lowercase , '''temp_json''' )
os.mkdir(_lowercase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowercase , _lowercase )
_lowerCamelCase : str = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_lowerCamelCase : Optional[int] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Tuple:
_lowerCamelCase : int = HfArgumentParser(_lowercase )
_lowerCamelCase : int = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = os.path.join(_lowercase , '''temp_yaml''' )
os.mkdir(_lowercase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowercase , _lowercase )
_lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_lowerCamelCase : Tuple = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> str:
_lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
self.assertIsNotNone(_lowercase )
| 434 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def lowerCamelCase__ ( __lowerCAmelCase : Any ):
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = np.nan
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = features[:, labels == i]
lowerCAmelCase_ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase_ = data - column_reshape(_SCREAMING_SNAKE_CASE )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_SCREAMING_SNAKE_CASE , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase_ = np.dot(_SCREAMING_SNAKE_CASE , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = features.mean(1 )
lowerCAmelCase_ = np.nan
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = features[:, labels == i]
lowerCAmelCase_ = data.shape[1]
lowerCAmelCase_ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) , (column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase_ = device_data * np.dot(
column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) , (column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if features.any():
lowerCAmelCase_ = features.mean(1 )
# Center the dataset
lowerCAmelCase_ = features - np.reshape(_SCREAMING_SNAKE_CASE , (data_mean.size, 1) )
lowerCAmelCase_ = np.dot(_SCREAMING_SNAKE_CASE , centered_data.T ) / features.shape[1]
lowerCAmelCase_ , lowerCAmelCase_ = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase_ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase_ = np.dot(filtered_eigenvectors.T , _SCREAMING_SNAKE_CASE )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_SCREAMING_SNAKE_CASE )
logging.error("Dataset empty" )
raise AssertionError
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase_ , lowerCAmelCase_ = eigh(
covariance_between_classes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , covariance_within_classes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
lowerCAmelCase_ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = np.linalg.svd(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = svd_matrix[:, 0:dimensions]
lowerCAmelCase_ = np.dot(filtered_svd_matrix.T , _SCREAMING_SNAKE_CASE )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_SCREAMING_SNAKE_CASE )
logging.error("Dataset empty" )
raise AssertionError
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase_ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase_ = 2
lowerCAmelCase_ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info:
lowerCAmelCase_ = linear_discriminant_analysis(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase_ = 2
lowerCAmelCase_ = np.array([[6.92_820_323, 8.66_025_404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info:
lowerCAmelCase_ = principal_component_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 279 | 0 |