code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 20 ):
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''') | 39 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) ) | 52 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : List[Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : Dict = concatenate_datasets
__lowerCamelCase : Optional[int] = DownloadConfig
__lowerCamelCase : Optional[int] = DownloadManager
__lowerCamelCase : int = DownloadMode
__lowerCamelCase : int = DownloadConfig
__lowerCamelCase : Any = DownloadMode
__lowerCamelCase : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 418 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase__ :
def __init__( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=14 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Optional[Any]=37 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Tuple=512 , UpperCamelCase_ : Tuple=0.02 , ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[Any] = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : List[Any] = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : Optional[Any] = use_input_mask
lowerCamelCase_ : Dict = use_token_type_ids
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : int = rotary_dim
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Optional[int] = vocab_size - 1
lowerCamelCase_ : int = vocab_size - 1
lowerCamelCase_ : str = vocab_size - 1
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Dict = None
if self.use_input_mask:
lowerCamelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : int = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = config_and_inputs
lowerCamelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 20
lowerCamelCase_ : Optional[Any] = model_class_name(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = model.init_cache(input_ids.shape[0] , UpperCamelCase_ )
lowerCamelCase_ : str = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCamelCase_ : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase_ : Union[str, Any] = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : Optional[Any] = model(
input_ids[:, -1:] , attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : int = model(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = 20
lowerCamelCase_ : int = model_class_name(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase_ : Any = model.init_cache(input_ids.shape[0] , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase_ : Any = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase_ , position_ids=UpperCamelCase_ , )
lowerCamelCase_ : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : int = FlaxGPTJModelTester(self )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@tooslow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Any = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowerCamelCase_ : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : str = model.config.eos_token_id
lowerCamelCase_ : Any = jax.jit(model.generate )
lowerCamelCase_ : Union[str, Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase_ : Union[str, Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Tuple = getattr(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : Any = pt_inputs['''input_ids'''].shape
lowerCamelCase_ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
lowerCamelCase_ : str = 0
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Union[str, Any] = pt_model_class(UpperCamelCase_ ).eval()
lowerCamelCase_ : int = model_class(UpperCamelCase_ , dtype=jnp.floataa )
lowerCamelCase_ : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = fx_state
with torch.no_grad():
lowerCamelCase_ : Optional[int] = pt_model(**UpperCamelCase_ ).to_tuple()
lowerCamelCase_ : List[Any] = fx_model(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = model_class.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ )
lowerCamelCase_ : Dict = fx_model_loaded(**UpperCamelCase_ ).to_tuple()
self.assertEqual(
len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase_ : Optional[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = pt_model_class(UpperCamelCase_ ).eval()
lowerCamelCase_ : str = model_class(UpperCamelCase_ , dtype=jnp.floataa )
lowerCamelCase_ : Tuple = load_flax_weights_in_pytorch_model(UpperCamelCase_ , fx_model.params )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = pt_inputs['''input_ids'''].shape
lowerCamelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : str = 0
lowerCamelCase_ : List[str] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase_ : List[Any] = pt_model(**UpperCamelCase_ ).to_tuple()
lowerCamelCase_ : str = fx_model(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = pt_model_class.from_pretrained(UpperCamelCase_ , from_flax=UpperCamelCase_ )
with torch.no_grad():
lowerCamelCase_ : Tuple = pt_model_loaded(**UpperCamelCase_ ).to_tuple()
self.assertEqual(
len(UpperCamelCase_ ) , len(UpperCamelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 418 | 1 |
from math import isclose, sqrt
def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> tuple[float, float, float]:
"""simple docstring"""
_a : Any = point_y / 4 / point_x
_a : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_a : Optional[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_a : Optional[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_a : List[Any] = outgoing_gradient**2 + 4
_a : int = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_a : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
_a : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_a : Dict = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_a : Optional[Any] = x_minus if isclose(__a ,__a ) else x_plus
_a : Optional[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCAmelCase ( __a : float = 1.4 ,__a : float = -9.6 ) -> int:
"""simple docstring"""
_a : int = 0
_a : float = first_x_coord
_a : float = first_y_coord
_a : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_a , _a , _a : Dict = next_point(__a ,__a ,__a )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MaskFormerFeatureExtractor']
UpperCAmelCase_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCAmelCase_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 712 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[int] = "Hello world! cécé herlolip"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
__lowerCAmelCase = roberta.model.encoder.sentence_encoder
__lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _UpperCamelCase )
__lowerCAmelCase = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
__lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
__lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
__lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase = model.roberta.encoder.layer[i]
__lowerCAmelCase = roberta_sent_encoder.layers[i]
__lowerCAmelCase = layer.attention
__lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
__lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
__lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
__lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCAmelCase = roberta_layer.final_layer_norm.weight
__lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase = roberta_layer.fca.weight
__lowerCAmelCase = roberta_layer.fca.bias
# output
__lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase = roberta_layer.fca.weight
__lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
__lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
__lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
__lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
__lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
__lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase = roberta.model.encoder.lm_head.weight
__lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
__lowerCAmelCase = model(_UpperCamelCase )[0]
if classification_head:
__lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_UpperCamelCase ) )
else:
__lowerCAmelCase = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__lowerCAmelCase = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
A : Optional[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 636 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = embedding_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
A__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
# test_resize_embeddings = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __magic_name__( self ):
lowerCAmelCase__ : str = MegatronBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
lowerCAmelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj]
lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj]
lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 678 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class a ( lowerCAmelCase_ ):
_snake_case : str = 'visual_bert'
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : Dict=768 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Tuple=3072 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Union[str, Any]=1e-1_2 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Any=2 , **__lowerCAmelCase : str , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = visual_embedding_dim
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = bypass_transformer
_UpperCAmelCase = special_visual_initialize
| 275 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return [ord(lowercase ) - 96 for elem in plain]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ ,lowercase )
print("""Decoded:""" ,decode(lowercase ) )
if __name__ == "__main__":
main()
| 275 | 1 |
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = order
# a_{0} ... a_{k}
SCREAMING_SNAKE_CASE_ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
SCREAMING_SNAKE_CASE_ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
SCREAMING_SNAKE_CASE_ = [0.0] * self.order
# y[n-1] ... y[n-k]
SCREAMING_SNAKE_CASE_ = [0.0] * self.order
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : list[float] , lowerCAmelCase_ : list[float] ) -> None:
"""simple docstring"""
if len(UpperCAmelCase_ ) < self.order:
SCREAMING_SNAKE_CASE_ = [1.0, *a_coeffs]
if len(UpperCAmelCase_ ) != self.order + 1:
SCREAMING_SNAKE_CASE_ = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(UpperCAmelCase_ )}'''
)
raise ValueError(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != self.order + 1:
SCREAMING_SNAKE_CASE_ = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(UpperCAmelCase_ )}'''
)
raise ValueError(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = a_coeffs
SCREAMING_SNAKE_CASE_ = b_coeffs
def _lowercase ( self : str , lowerCAmelCase_ : float ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
SCREAMING_SNAKE_CASE_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
SCREAMING_SNAKE_CASE_ = self.input_history[:-1]
SCREAMING_SNAKE_CASE_ = self.output_history[:-1]
SCREAMING_SNAKE_CASE_ = sample
SCREAMING_SNAKE_CASE_ = result
return result
| 393 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] ={}
if "candidate_labels" in kwargs:
lowerCamelCase__: Tuple =kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCamelCase__: Tuple =kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str:
'''simple docstring'''
lowerCamelCase__: int =load_image(UpperCAmelCase_)
lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework)
lowerCamelCase__: Any =candidate_labels
lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels]
lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_)
lowerCamelCase__: str =[text_inputs]
return inputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =model_inputs.pop("candidate_labels")
lowerCamelCase__: List[str] =model_inputs.pop("text_inputs")
if isinstance(text_inputs[0] , UpperCAmelCase_):
lowerCamelCase__: List[Any] =text_inputs[0]
else:
# Batching case.
lowerCamelCase__: List[Any] =text_inputs[0][0]
lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str ={
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels")
lowerCamelCase__: Optional[int] =model_outputs["logits"][0]
if self.framework == "pt":
lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1)
lowerCamelCase__: Optional[Any] =probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =[scores]
elif self.framework == "tf":
lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1)
lowerCamelCase__: Optional[int] =probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
lowerCamelCase__: Optional[int] =[
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0])
]
return result
| 59 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
_a = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_a = get_sagemaker_input()
else:
_a = get_cluster_input()
return config
def lowerCAmelCase ( UpperCamelCase_: List[Any]=None ) -> str:
'''simple docstring'''
if subparsers is not None:
_a = subparsers.add_parser("config" , description=UpperCamelCase_ )
else:
_a = argparse.ArgumentParser("Accelerate config command" , description=UpperCamelCase_ )
parser.add_argument(
"--config_file" , default=UpperCamelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_ )
return parser
def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] ) -> Dict:
'''simple docstring'''
_a = get_user_input()
if args.config_file is not None:
_a = args.config_file
else:
if not os.path.isdir(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
_a = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(UpperCamelCase_ )
else:
config.to_yaml_file(UpperCamelCase_ )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
_a = config_command_parser()
_a = parser.parse_args()
config_command(UpperCamelCase_ )
if __name__ == "__main__":
main()
| 612 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase ( UpperCamelCase_: ndarray ) -> float:
'''simple docstring'''
return np.dot(UpperCamelCase_ , UpperCamelCase_ )
class lowercase_ :
def __init__( self , *,
a_ = np.inf , a_ = "linear" , a_ = 0.0 , ) ->None:
'''simple docstring'''
_a = regularization
_a = gamma
if kernel == "linear":
_a = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
_a = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_a = f'''Unknown kernel: {kernel}'''
raise ValueError(a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->float:
'''simple docstring'''
return np.dot(a_ , a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCamelCase__ ( self , a_ , a_ ) ->None:
'''simple docstring'''
_a = observations
_a = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_a) , ) = np.shape(a_ )
def to_minimize(a_ ) -> float:
_a = 0
((_a) , ) = np.shape(a_ )
for i in range(a_ ):
for j in range(a_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(a_ )
_a = LinearConstraint(a_ , 0 , 0 )
_a = Bounds(0 , self.regularization )
_a = minimize(
a_ , np.ones(a_ ) , bounds=a_ , constraints=[ly_contraint] ).x
_a = l_star
# calculating mean offset of separation plane to points
_a = 0
for i in range(a_ ):
for j in range(a_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_a = s / n
def lowerCamelCase__ ( self , a_ ) ->int:
'''simple docstring'''
_a = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , a_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 | 1 |
a_ :Optional[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ :Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ :Any = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase_ (A : int , A : int , A : int ):
assert len(str(A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case__ : Any = year // 1_0_0
snake_case__ : List[str] = (5 * (century % 4) + 2) % 7
snake_case__ : List[Any] = year % 1_0_0
snake_case__ : Tuple = centurian % 1_2
snake_case__ : Tuple = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case__ : Optional[int] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case__ : str = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[int] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 478 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase :Optional[int] = Dataset.from_dict(a_)
return dataset
class _lowerCAmelCase ( _UpperCAmelCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :Any = get_dataset()
lowerCamelCase :int = make_duplicate_clusters(lowerCamelCase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case ( self : List[str] ):
lowerCamelCase :List[Any] = get_dataset()
lowerCamelCase :Optional[Any] = deduplicate_dataset(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
print(lowerCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowerCamelCase_ )
| 718 | import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowerCAmelCase__ = {
'ctrl': 2_56,
}
lowerCAmelCase__ = {
'Pregnancy': 16_86_29,
'Christianity': 76_75,
'Explain': 10_64_23,
'Fitness': 6_34_40,
'Saving': 6_31_63,
'Ask': 2_71_71,
'Ass': 9_59_85,
'Joke': 16_35_09,
'Questions': 4_56_22,
'Thoughts': 4_96_05,
'Retail': 5_23_42,
'Feminism': 16_43_38,
'Writing': 1_19_92,
'Atheism': 19_22_63,
'Netflix': 4_86_16,
'Computing': 3_96_39,
'Opinion': 4_32_13,
'Alone': 4_49_67,
'Funny': 5_89_17,
'Gaming': 4_03_58,
'Human': 40_88,
'India': 13_31,
'Joker': 7_71_38,
'Diet': 3_62_06,
'Legal': 1_18_59,
'Norman': 49_39,
'Tip': 7_26_89,
'Weight': 5_23_43,
'Movies': 4_62_73,
'Running': 2_34_25,
'Science': 20_90,
'Horror': 3_77_93,
'Confession': 6_05_72,
'Finance': 1_22_50,
'Politics': 1_63_60,
'Scary': 19_19_85,
'Support': 1_26_54,
'Technologies': 3_25_16,
'Teenage': 6_61_60,
'Event': 3_27_69,
'Learned': 6_74_60,
'Notion': 18_27_70,
'Wikipedia': 3_75_83,
'Books': 66_65,
'Extract': 7_60_50,
'Confessions': 10_27_01,
'Conspiracy': 7_59_32,
'Links': 6_36_74,
'Narcissus': 15_04_25,
'Relationship': 5_47_66,
'Relationships': 13_47_96,
'Reviews': 4_16_71,
'News': 42_56,
'Translation': 2_68_20,
'multilingual': 12_84_06,
}
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Union[str, Any] = set()
_A : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : str = char
_A : str = set(UpperCamelCase__ )
return pairs
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = CONTROL_CODES
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="<unk>" , **__lowerCamelCase) -> int:
super().__init__(unk_token=__lowerCamelCase , **__lowerCamelCase)
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[Any] = json.load(__lowerCamelCase)
_A : Tuple = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Dict = merges_handle.read().split("\n")[1:-1]
_A : Union[str, Any] = [tuple(merge.split()) for merge in merges]
_A : List[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : Tuple = {}
@property
def _lowerCamelCase ( self) -> Optional[Any]:
return len(self.encoder)
def _lowerCamelCase ( self) -> int:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
if token in self.cache:
return self.cache[token]
_A : int = tuple(__lowerCamelCase)
_A : Optional[Any] = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_A : str = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[Any] = bigram
_A : str = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : int = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : Optional[Any] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : Tuple = get_pairs(__lowerCamelCase)
_A : Any = "@@ ".join(__lowerCamelCase)
_A : Dict = word[:-4]
_A : str = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
_A : Tuple = []
_A : str = re.findall(r"\S+\n?" , __lowerCamelCase)
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
return self.decoder.get(__lowerCamelCase , self.unk_token)
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
_A : List[str] = " ".join(__lowerCamelCase).replace("@@ " , "").strip()
return out_string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Optional[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Optional[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : List[str] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : List[Any] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 503 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
__SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
__SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__SCREAMING_SNAKE_CASE = False
@property
def _lowerCamelCase ( self) -> Tuple:
return 3_2
@property
def _lowerCamelCase ( self) -> Dict:
return 3_2
@property
def _lowerCamelCase ( self) -> str:
return self.time_input_dim
@property
def _lowerCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self) -> Tuple:
torch.manual_seed(0)
_A : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_A : str = UNetaDConditionModel(**__lowerCamelCase)
return model
@property
def _lowerCamelCase ( self) -> str:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self) -> List[Any]:
torch.manual_seed(0)
_A : int = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self) -> Any:
_A : List[str] = self.dummy_unet
_A : Optional[int] = self.dummy_movq
_A : str = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_A : int = DDIMScheduler(**__lowerCamelCase)
_A : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=0) -> Any:
_A : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase)).to(__lowerCamelCase)
_A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__lowerCamelCase)
# create init_image
_A : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCamelCase)).to(__lowerCamelCase)
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase)).convert("RGB").resize((2_5_6, 2_5_6))
# create hint
_A : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCamelCase)).to(__lowerCamelCase)
if str(__lowerCamelCase).startswith("mps"):
_A : Dict = torch.manual_seed(__lowerCamelCase)
else:
_A : Tuple = torch.Generator(device=__lowerCamelCase).manual_seed(__lowerCamelCase)
_A : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "cpu"
_A : List[Any] = self.get_dummy_components()
_A : Any = self.pipeline_class(**__lowerCamelCase)
_A : Any = pipe.to(__lowerCamelCase)
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : str = pipe(**self.get_dummy_inputs(__lowerCamelCase))
_A : int = output.images
_A : Dict = pipe(
**self.get_dummy_inputs(__lowerCamelCase) , return_dict=__lowerCamelCase , )[0]
_A : Any = image[0, -3:, -3:, -1]
_A : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_A : List[str] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self) -> Tuple:
_A : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_A : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_A : str = init_image.resize((5_1_2, 5_1_2))
_A : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_A : Optional[Any] = torch.from_numpy(np.array(__lowerCamelCase)).float() / 2_5_5.0
_A : Optional[Any] = hint.permute(2 , 0 , 1).unsqueeze(0)
_A : List[str] = "A robot, 4k photo"
_A : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__lowerCamelCase)
_A : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_A : Dict = pipeline.to(__lowerCamelCase)
pipeline.set_progress_bar_config(disable=__lowerCamelCase)
_A : Tuple = torch.Generator(device="cpu").manual_seed(0)
_A , _A : List[str] = pipe_prior(
__lowerCamelCase , image=__lowerCamelCase , strength=0.8_5 , generator=__lowerCamelCase , negative_prompt="" , ).to_tuple()
_A : Optional[int] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
_A : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase)
| 503 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase, _lowerCAmelCase ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_a , _a = 1, 1
for _ in range(number_of_steps - 1 ):
_a , _a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Any=False ):
"""simple docstring"""
_a = '''backbone.''' if is_semantic else ''''''
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', '''beit.embeddings.cls_token'''),
(f'{prefix}patch_embed.proj.weight', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'{prefix}patch_embed.proj.bias', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'{prefix}pos_embed', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=False, _lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_a = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
_a = in_proj_weight[
: config.hidden_size, :
]
_a = q_bias
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_a = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
_a = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
_a = gamma_a
_a = gamma_a
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = dct.pop(_lowerCAmelCase )
_a = val
def A_ ( ):
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_a = False if '''rvlcdip''' in checkpoint_url else True
_a = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase, use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_a = 10_24
_a = 40_96
_a = 24
_a = 16
# labels
if "rvlcdip" in checkpoint_url:
_a = 16
_a = '''huggingface/label-files'''
_a = '''rvlcdip-id2label.json'''
_a = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_a = torch.hub.load_state_dict_from_url(_lowerCAmelCase, map_location='''cpu''' )['''model''']
_a = create_rename_keys(_lowerCAmelCase, has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase, has_lm_head=_lowerCAmelCase )
# load HuggingFace model
_a = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
_a = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=_lowerCAmelCase )
_a = prepare_img()
_a = image_processor(images=_lowerCAmelCase, return_tensors='''pt''' )
_a = encoding['''pixel_values''']
_a = model(_lowerCAmelCase )
_a = outputs.logits
# verify logits
_a = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
_a = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
_a = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase, _lowerCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=_lowerCAmelCase, )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase, _lowerCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=_lowerCAmelCase, )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 285 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="detr"
UpperCAmelCase_ : Union[str, Any] =["past_key_values"]
UpperCAmelCase_ : List[Any] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=100 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : Optional[int] = backbone_config.get("model_type" )
__snake_case : Any = CONFIG_MAPPING[backbone_model_type]
__snake_case : Tuple = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
__snake_case , __snake_case , __snake_case : Tuple = None, None, None
__snake_case : str = use_timm_backbone
__snake_case : Union[str, Any] = backbone_config
__snake_case : Tuple = num_channels
__snake_case : Optional[Any] = num_queries
__snake_case : Tuple = d_model
__snake_case : str = encoder_ffn_dim
__snake_case : Dict = encoder_layers
__snake_case : Optional[Any] = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Any = decoder_layers
__snake_case : Optional[Any] = decoder_attention_heads
__snake_case : Any = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Tuple = activation_function
__snake_case : Optional[int] = init_std
__snake_case : str = init_xavier_std
__snake_case : List[Any] = encoder_layerdrop
__snake_case : List[Any] = decoder_layerdrop
__snake_case : Dict = encoder_layers
__snake_case : Tuple = auxiliary_loss
__snake_case : str = position_embedding_type
__snake_case : Tuple = backbone
__snake_case : Union[str, Any] = use_pretrained_backbone
__snake_case : List[str] = dilation
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : int = bbox_cost
__snake_case : Union[str, Any] = giou_cost
# Loss coefficients
__snake_case : str = mask_loss_coefficient
__snake_case : Optional[int] = dice_loss_coefficient
__snake_case : Union[str, Any] = bbox_loss_coefficient
__snake_case : List[str] = giou_loss_coefficient
__snake_case : str = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return cls(backbone_config=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
__snake_case : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__snake_case : Union[str, Any] = self.backbone_config.to_dict()
__snake_case : Dict = self.__class__.model_type
return output
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str =version.parse("1.11" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return 12
| 243 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict ="ClapFeatureExtractor"
UpperCAmelCase_ : Union[str, Any] =("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = kwargs.pop("sampling_rate" , UpperCAmelCase )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__snake_case : Optional[int] = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if audios is not None:
__snake_case : int = self.feature_extractor(
UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and audios is not None:
__snake_case : str = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 243 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCamelCase_ : int = logging.get_logger(__name__)
def __magic_name__( _A , _A ):
'''simple docstring'''
def run_func(_A ):
@wraps(lowerCAmelCase__ )
def run_in_eager_mode(*_A , **_A ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
@wraps(lowerCAmelCase__ )
@tf.function(experimental_compile=lowerCAmelCase__ )
def run_in_graph_mode(*_A , **_A ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = random.Random()
UpperCamelCase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _SCREAMING_SNAKE_CASE ( __A ):
'''simple docstring'''
__a : TensorFlowBenchmarkArguments
__a : PretrainedConfig
__a : str = "TensorFlow"
@property
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return tf.__version__
def A ( self : Tuple , lowercase : List[str] , lowercase : Optional[int] , lowercase : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase__ = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_inference )
def A ( self : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase__ = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_train )
def A ( self : Dict , lowercase : Tuple , lowercase : Any , lowercase : Tuple ) -> str:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
UpperCamelCase__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase__ = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_inference )
def A ( self : Optional[int] , lowercase : str , lowercase : Dict , lowercase : Any ) -> Union[str, Any]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
UpperCamelCase__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase__ = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_train )
def A ( self : List[str] , lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase__ = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase__ = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase__ = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ , training=UpperCamelCase__ )
UpperCamelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A ( self : Tuple , lowercase : Tuple , lowercase : str , lowercase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase__ = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase__ = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
UpperCamelCase__ = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase__ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
UpperCamelCase__ = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
UpperCamelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A ( self : Tuple , lowercase : Any ) -> str:
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase__ = timeit.repeat(
UpperCamelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCamelCase__ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn\'t fit on GPU. {e}" )
def A ( self : Union[str, Any] , lowercase : Tuple ) -> str:
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
UpperCamelCase__ = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
UpperCamelCase__ = 'N/A'
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase__ = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
UpperCamelCase__ = meminfo.used
UpperCamelCase__ = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
UpperCamelCase__ = None
else:
UpperCamelCase__ = measure_peak_memory_cpu(UpperCamelCase__ )
UpperCamelCase__ = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase__ = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
UpperCamelCase__ = summary.total
else:
UpperCamelCase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn\'t fit on GPU. {e}" )
return "N/A", None
| 709 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
'''simple docstring'''
def A ( self : List[str] , lowercase : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase__ = [tuple(lowercase ) if isinstance(lowercase , lowercase ) else key for key in keys]
UpperCamelCase__ = Counter(lowercase )
UpperCamelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def A ( self : List[str] , lowercase : int , lowercase : str=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = super().construct_mapping(lowercase , deep=lowercase )
self._check_no_duplicates_on_constructed_node(lowercase )
return mapping
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase__ = full_content[1:].index("""---""" ) + 1
UpperCamelCase__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_A )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A ( cls : int , lowercase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase )
else:
return cls()
def A ( self : int , lowercase : Path ) -> Dict:
'''simple docstring'''
if path.exists():
with open(lowercase , encoding="""utf-8""" ) as readme_file:
UpperCamelCase__ = readme_file.read()
else:
UpperCamelCase__ = None
UpperCamelCase__ = self._to_readme(lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowercase )
def A ( self : Any , lowercase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(lowercase )
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def A ( cls : Tuple , lowercase : str ) -> "DatasetMetadata":
'''simple docstring'''
UpperCamelCase__ = yaml.load(lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase )
def A ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase , allow_unicode=lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCamelCase_ : str = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase_ : Tuple = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCamelCase_ : str = ap.parse_args()
lowerCamelCase_ : List[str] = Path(args.readme_filepath)
lowerCamelCase_ : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 265 | 0 |
from math import isqrt
def _a ( lowerCAmelCase )-> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase ) + 1 ) )
def _a ( lowerCAmelCase = 10**6 )-> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""") | 360 |
from math import isqrt
def _a ( lowerCAmelCase )-> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase ) + 1 ) )
def _a ( lowerCAmelCase = 10**6 )-> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""") | 360 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Dict = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_a : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase__ ( _A: Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase__ ( _A: Optional[Any] , _A: Optional[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
__lowerCamelCase = False
elif args.student_type == "gpt2":
__lowerCamelCase = False
def UpperCamelCase__ ( _A: Dict , _A: Any ):
'''simple docstring'''
if args.student_type == "roberta":
__lowerCamelCase = False
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=_A , required=_A , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=_A , required=_A , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=_A , choices=["""distilbert""", """roberta""", """gpt2"""] , required=_A , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=_A , required=_A , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=_A , type=_A , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=_A , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=_A , required=_A , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=_A , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=_A , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=_A , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=_A , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=_A , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=_A , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=_A , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=_A , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=_A , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=_A , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=_A , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=_A , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=_A , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=_A , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_A , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=_A , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_A , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=_A , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=_A , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=_A , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=_A , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_A , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=_A , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=_A , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=_A , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=_A , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=_A , default=4000 , help="""Checkpoint interval.""" )
__lowerCamelCase = parser.parse_args()
sanity_checks(_A )
# ARGS #
init_gpu_params(_A )
set_seed(_A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(_A ) , _A , indent=4 )
git_log(args.dump_path )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = MODEL_CLASSES[args.student_type]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowerCamelCase = tokenizer.all_special_tokens.index(_A )
__lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
__lowerCamelCase = special_tok_ids
__lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
__lowerCamelCase = pickle.load(_A )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
__lowerCamelCase = pickle.load(_A )
__lowerCamelCase = np.maximum(_A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowerCamelCase = 0.0 # do not predict special tokens
__lowerCamelCase = torch.from_numpy(_A )
else:
__lowerCamelCase = None
__lowerCamelCase = LmSeqsDataset(params=_A , data=_A )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
__lowerCamelCase = student_config_class.from_pretrained(args.student_config )
__lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=_A )
else:
__lowerCamelCase = student_model_class(_A )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
__lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_A )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_A , _A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_A , _A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowerCamelCase = Distiller(
params=_A , dataset=_A , token_probs=_A , student=_A , teacher=_A )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 571 | 0 |
"""simple docstring"""
import os
def lowercase__ ( ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = os.path.join(os.path.dirname(lowerCamelCase ) , "num.txt" )
with open(lowerCamelCase ) as file_hand:
return str(sum(int(lowerCamelCase ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 308 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None ):
'''simple docstring'''
if not conversation_id:
_UpperCAmelCase : Any = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase : Optional[int] = []
if generated_responses is None:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : uuid.UUID = conversation_id
_UpperCAmelCase : List[str] = past_user_inputs
_UpperCAmelCase : List[str] = generated_responses
_UpperCAmelCase : Optional[str] = text
def __eq__( self , A_ ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self , A_ , A_ = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
_UpperCAmelCase : Tuple = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_UpperCAmelCase : int = text
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase : Dict = None
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.generated_responses.append(A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCAmelCase : Any = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
UpperCAmelCase , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class a ( UpperCAmelCase ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer.eos_token
def _UpperCAmelCase ( self , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Optional[int] = {}
if min_length_for_response is not None:
_UpperCAmelCase : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase : Any = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase : Dict = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : str = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self , A_ , A_=32 ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
_UpperCAmelCase : Optional[Any] = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase : Optional[int] = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
_UpperCAmelCase : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self , A_ , A_=10 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length )
_UpperCAmelCase : List[Any] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_UpperCAmelCase : int = max_length - minimum_tokens
_UpperCAmelCase : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase : Union[str, Any] = model_inputs["attention_mask"][:, -trim:]
_UpperCAmelCase : Optional[int] = model_inputs.pop("conversation" )
_UpperCAmelCase : Union[str, Any] = max_length
_UpperCAmelCase : Any = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase : Union[str, Any] = 1
else:
_UpperCAmelCase : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self , A_ , A_=True ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = model_outputs["output_ids"]
_UpperCAmelCase : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
_UpperCAmelCase : Any = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer.eos_token_id
_UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
_UpperCAmelCase : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 300 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Any = TypeVar("""T""")
class __lowerCAmelCase( Generic[T] ):
def __init__( self : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = data
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self
SCREAMING_SNAKE_CASE_ :Dict = 0
class __lowerCAmelCase( Generic[T] ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = DisjointSetTreeNode(__lowerCAmelCase )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
SCREAMING_SNAKE_CASE_ :Any = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if nodea.rank > nodea.rank:
SCREAMING_SNAKE_CASE_ :List[str] = nodea
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
self.link(self.find_set(__lowerCAmelCase ) , self.find_set(__lowerCAmelCase ) )
class __lowerCAmelCase( Generic[T] ):
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if node not in self.connections:
SCREAMING_SNAKE_CASE_ :Optional[Any] = {}
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ :Optional[Any] = weight
SCREAMING_SNAKE_CASE_ :int = weight
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = []
SCREAMING_SNAKE_CASE_ :int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : x[2] )
# creating the disjoint set
SCREAMING_SNAKE_CASE_ :List[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowerCAmelCase )
# MST generation
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :Tuple = 0
SCREAMING_SNAKE_CASE_ :Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = edges[index]
index += 1
SCREAMING_SNAKE_CASE_ :Optional[int] = disjoint_set.find_set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ :List[Any] = disjoint_set.find_set(__lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
disjoint_set.union(__lowerCAmelCase , __lowerCAmelCase )
return graph
| 707 |
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :List[Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(SCREAMING_SNAKE_CASE ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE__ : int = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 233 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCAmelCase ( __snake_case ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def __UpperCamelCase ( lowercase__ : str, lowercase__ : Any=0.999, lowercase__ : Any="cosine", ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__lowercase =[]
for i in range(_lowerCAmelCase ):
__lowercase =i / num_diffusion_timesteps
__lowercase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ), _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase, dtype=torch.floataa )
class lowerCAmelCase ( __snake_case , __snake_case ):
@register_to_config
def __init__( self : int , __lowercase : Optional[Any] = 1000 , __lowercase : Optional[int] = "fixed_small_log" , __lowercase : Any = True , __lowercase : Dict = 1.0 , __lowercase : Tuple = "epsilon" , __lowercase : int = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__lowercase =betas_for_alpha_bar(A_ )
__lowercase =1.0 - self.betas
__lowercase =torch.cumprod(self.alphas , dim=0 )
__lowercase =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__lowercase =1.0
# setable values
__lowercase =None
__lowercase =torch.from_numpy(np.arange(0 , A_ )[::-1].copy() )
__lowercase =variance_type
def snake_case ( self : List[str] , __lowercase : Any , __lowercase : int = None ):
"""simple docstring"""
return sample
def snake_case ( self : int , __lowercase : List[str] , __lowercase : Tuple = None ):
"""simple docstring"""
__lowercase =num_inference_steps
__lowercase =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__lowercase =(np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__lowercase =torch.from_numpy(A_ ).to(A_ )
def snake_case ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : List[str]=None , __lowercase : Optional[int]=None , __lowercase : Union[str, Any]=None ):
"""simple docstring"""
if prev_timestep is None:
__lowercase =t - 1
__lowercase =self.alphas_cumprod[t]
__lowercase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowercase =1 - alpha_prod_t
__lowercase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowercase =self.betas[t]
else:
__lowercase =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__lowercase =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__lowercase =torch.log(torch.clamp(A_ , min=1E-20 ) )
__lowercase =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__lowercase =variance.log()
__lowercase =beta.log()
__lowercase =(predicted_variance + 1) / 2
__lowercase =frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : int = None , __lowercase : Tuple=None , __lowercase : int = True , ):
"""simple docstring"""
__lowercase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__lowercase =torch.split(A_ , sample.shape[1] , dim=1 )
else:
__lowercase =None
# 1. compute alphas, betas
if prev_timestep is None:
__lowercase =t - 1
__lowercase =self.alphas_cumprod[t]
__lowercase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowercase =1 - alpha_prod_t
__lowercase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowercase =self.betas[t]
__lowercase =self.alphas[t]
else:
__lowercase =1 - alpha_prod_t / alpha_prod_t_prev
__lowercase =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase =torch.clamp(
A_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__lowercase =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowercase =0
if t > 0:
__lowercase =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=A_ , device=model_output.device )
__lowercase =self._get_variance(
A_ , predicted_variance=A_ , prev_timestep=A_ , )
if self.variance_type == "fixed_small_log":
__lowercase =variance
elif self.variance_type == "learned_range":
__lowercase =(0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
' for the UnCLIPScheduler.' )
__lowercase =variance * variance_noise
__lowercase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
def snake_case ( self : Dict , __lowercase : List[Any] , __lowercase : int , __lowercase : List[str] , ):
"""simple docstring"""
__lowercase =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__lowercase =timesteps.to(original_samples.device )
__lowercase =alphas_cumprod[timesteps] ** 0.5
__lowercase =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__lowercase =sqrt_alpha_prod.unsqueeze(-1 )
__lowercase =(1 - alphas_cumprod[timesteps]) ** 0.5
__lowercase =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__lowercase =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__lowercase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 119 |
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase : List[Any] = 0
for ch in input_str:
UpperCamelCase : Optional[Any] = ord(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = pow(2 , _lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 0 |
from collections.abc import Sequence
from queue import Queue
class __A :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
lowerCamelCase__ : int = start
lowerCamelCase__ : Union[str, Any] = end
lowerCamelCase__ : List[Any] = val
lowerCamelCase__ : int = (start + end) // 2
lowerCamelCase__ : List[str] = left
lowerCamelCase__ : Optional[int] = right
def __repr__(self ):
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class __A :
def __init__(self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[Any] = collection
lowerCamelCase__ : List[str] = function
if self.collection:
lowerCamelCase__ : Optional[Any] = self._build_tree(0 , len(__magic_name__ ) - 1 )
def _snake_case (self , __magic_name__ , __magic_name__ ):
self._update_tree(self.root , __magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
return self._query_range(self.root , __magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
if start == end:
return SegmentTreeNode(__magic_name__ , __magic_name__ , self.collection[start] )
lowerCamelCase__ : int = (start + end) // 2
lowerCamelCase__ : Optional[Any] = self._build_tree(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Any = self._build_tree(mid + 1 , __magic_name__ )
return SegmentTreeNode(__magic_name__ , __magic_name__ , self.fn(left.val , right.val ) , __magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
if node.start == i and node.end == i:
lowerCamelCase__ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __magic_name__ , __magic_name__ )
else:
self._update_tree(node.right , __magic_name__ , __magic_name__ )
lowerCamelCase__ : Dict = self.fn(node.left.val , node.right.val )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __magic_name__ , __magic_name__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __magic_name__ , node.mid ) , self._query_range(node.right , node.mid + 1 , __magic_name__ ) , )
else:
# range in right child tree
return self._query_range(node.right , __magic_name__ , __magic_name__ )
def _snake_case (self ):
if self.root is not None:
lowerCamelCase__ : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
lowerCamelCase__ : Optional[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
_lowercase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 96 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_lowercase = logging.getLogger(__name__)
class __A ( A_ ):
UpperCamelCase :Optional[int] = '''token-classification'''
def __init__(self , __magic_name__ ):
if type(__magic_name__ ) == dict:
lowerCamelCase__ : Any = Namespace(**__magic_name__ )
lowerCamelCase__ : str = import_module("""tasks""" )
try:
lowerCamelCase__ : Optional[Any] = getattr(__magic_name__ , hparams.task_type )
lowerCamelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
lowerCamelCase__ : Any = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase__ : Tuple = CrossEntropyLoss().ignore_index
super().__init__(__magic_name__ , len(self.labels ) , self.mode )
def _snake_case (self , **__magic_name__ ):
return self.model(**__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase__ : Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase__ : List[str] = self(**__magic_name__ )
lowerCamelCase__ : Dict = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _snake_case (self ):
lowerCamelCase__ : Dict = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase__ : List[str] = self._feature_file(__magic_name__ )
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __magic_name__ )
lowerCamelCase__ : Union[str, Any] = torch.load(__magic_name__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCamelCase__ : int = self.token_classification_task.read_examples_from_file(args.data_dir , __magic_name__ )
lowerCamelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
__magic_name__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __magic_name__ )
torch.save(__magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = False ):
lowerCamelCase__ : Any = self._feature_file(__magic_name__ )
logger.info("""Loading features from cached file %s""" , __magic_name__ )
lowerCamelCase__ : Optional[Any] = torch.load(__magic_name__ )
lowerCamelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase__ : int = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase__ : Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , batch_size=__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
"""Compute validation""" ""
lowerCamelCase__ : Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase__ : Tuple = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase__ : str = self(**__magic_name__ )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = outputs[:2]
lowerCamelCase__ : List[Any] = logits.detach().cpu().numpy()
lowerCamelCase__ : Dict = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCamelCase__ : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCamelCase__ : List[str] = np.argmax(__magic_name__ , axis=2 )
lowerCamelCase__ : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCamelCase__ : Optional[int] = dict(enumerate(self.labels ) )
lowerCamelCase__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase__ : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__magic_name__ , __magic_name__ ),
"""precision""": precision_score(__magic_name__ , __magic_name__ ),
"""recall""": recall_score(__magic_name__ , __magic_name__ ),
"""f1""": fa_score(__magic_name__ , __magic_name__ ),
}
lowerCamelCase__ : Dict = dict(results.items() )
lowerCamelCase__ : str = results
return ret, preds_list, out_label_list
def _snake_case (self , __magic_name__ ):
# when stable
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : str = self._eval_end(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case (self , __magic_name__ ):
# updating to test_epoch_end instead of deprecated test_end
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = self._eval_end(__magic_name__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase__ : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case (__magic_name__ , __magic_name__ ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__magic_name__ , __magic_name__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__magic_name__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__magic_name__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__magic_name__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__magic_name__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
_lowercase = parser.parse_args()
_lowercase = NERTransformer(args)
_lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
_lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 96 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=[] ):
__a = size[0] - overlap_pixels * 2
__a = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__a = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__a = np.pad(__lowerCamelCase , mode='linear_ramp' , pad_width=__lowerCamelCase , end_values=0 )
if "l" in remove_borders:
__a = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__a = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__a = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__a = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return max(__lowerCamelCase , min(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = list(__lowerCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__a = clamp_rect(__lowerCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCamelCase , (original_slice, 0) )
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__a = tile.crop(__lowerCamelCase )
return tile
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = n % d
return n - divisor
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3_5_0 , ) -> List[str]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> int:
torch.manual_seed(0 )
__a = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__a = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__a = image.crop(UpperCAmelCase )
__a = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__a = translated_slice_x - (original_image_slice / 2)
__a = max(0 , UpperCAmelCase )
__a = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = to_input.size
__a = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__a = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__a = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__a = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__a = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__a = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__a = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 7_5 , UpperCAmelCase = 9.0 , UpperCAmelCase = 5_0 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 1_2_8 , UpperCAmelCase = 3_2 , UpperCAmelCase = 3_2 , ) -> Optional[Any]:
__a = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__a = math.ceil(image.size[0] / tile_size )
__a = math.ceil(image.size[1] / tile_size )
__a = tcx * tcy
__a = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def lowerCAmelCase( ):
# Run a demo
__a = 'stabilityai/stable-diffusion-x4-upscaler'
__a = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCamelCase , revision='fp16' , torch_dtype=torch.floataa )
__a = pipe.to('cuda' )
__a = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(__lowerCamelCase ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__a = pipe(image=__lowerCamelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=__lowerCamelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 559 | # limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 5_0 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[ImagePipelineOutput, Tuple]:
__a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
__a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 559 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase : Dict = ["small", "medium", "large"]
_lowerCAmelCase : Dict = "lm_head.decoder.weight"
_lowerCAmelCase : List[Any] = "lm_head.weight"
def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = torch.load(__snake_case )
UpperCAmelCase__ = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_lowerCAmelCase : str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase : str = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_lowerCAmelCase : List[str] = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712 |
import torch
from transformers import AutoModel
class _UpperCamelCase ( torch.nn.Module ):
def __init__( self :str , lowerCamelCase :Tuple="sayef/fsner-bert-base-uncased" ) -> int:
super(lowerCamelCase , self ).__init__()
UpperCAmelCase__ = AutoModel.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
UpperCAmelCase__ = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCAmelCase__ = torch.nn.Softmax(dim=1 )
def UpperCAmelCase_ ( self :Union[str, Any] , **lowerCamelCase :Tuple ) -> Dict:
return self.bert(**lowerCamelCase ).last_hidden_state
def UpperCAmelCase_ ( self :Any , lowerCamelCase :Union[str, Any] ) -> Union[str, Any]:
return token_embeddings.sum(2 , keepdim=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :List[Any] , lowerCamelCase :int , lowerCamelCase :Union[str, Any]=1 ) -> Dict:
return self.softmax(T * self.cos(lowerCamelCase , lowerCamelCase ) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Any , lowerCamelCase :Any ) -> Union[str, Any]:
UpperCAmelCase__ = W_supports["sizes"].tolist()
UpperCAmelCase__ = W_supports["start_token_id"].item()
UpperCAmelCase__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase__ = self.BERT(**lowerCamelCase )
UpperCAmelCase__ = self.BERT(**lowerCamelCase )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = W_supports["input_ids"] == start_token_id
UpperCAmelCase__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(lowerCamelCase ):
if i == 0:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = support_sizes[i - 1]
UpperCAmelCase__ = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase__ = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase__ = torch.vstack((p_starts, p_start) )
UpperCAmelCase__ = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase__ = p_start
UpperCAmelCase__ = p_end
return p_starts, p_ends
| 364 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , ) -> float:
__lowerCamelCase : List[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCamelCase : Any = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
A__ : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
return " ".join(
"""""".join(word[::-1] ) if len(__UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 299 | 0 |
class snake_case_ :
'''simple docstring'''
def __init__( self, A_ ) -> None:
UpperCAmelCase__ =size
UpperCAmelCase__ =[0] * size
UpperCAmelCase__ =[0] * size
@staticmethod
def __UpperCAmelCase ( A_ ) -> int:
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( A_ ) -> int:
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self, A_, A_ ) -> None:
UpperCAmelCase__ =value
while index < self.size:
UpperCAmelCase__ =self.get_prev(A_ ) + 1
if current_left_border == index:
UpperCAmelCase__ =value
else:
UpperCAmelCase__ =max(A_, A_, A_ )
UpperCAmelCase__ =self.get_next(A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase__ =0
while left <= right:
UpperCAmelCase__ =self.get_prev(A_ )
if left <= current_left:
UpperCAmelCase__ =max(A_, self.tree[right] )
UpperCAmelCase__ =current_left
else:
UpperCAmelCase__ =max(A_, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip_text_model'
def __init__( self, A_=3_0524, A_=768, A_=768, A_=3072, A_=768, A_=12, A_=8, A_=512, A_="gelu", A_=1E-12, A_=0.0, A_=0.0, A_=0.02, A_=3_0522, A_=2, A_=0, A_=102, A_=True, A_=True, **A_, ) -> Any:
super().__init__(
pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, sep_token_id=A_, **A_, )
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =encoder_hidden_size
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =is_decoder
UpperCAmelCase__ =use_cache
@classmethod
def __UpperCAmelCase ( cls, A_, **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
UpperCAmelCase__ , UpperCAmelCase__ =cls.get_config_dict(A_, **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_, **A_ )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip_vision_model'
def __init__( self, A_=768, A_=3072, A_=512, A_=12, A_=12, A_=384, A_=16, A_="gelu", A_=1E-5, A_=0.0, A_=1E-10, **A_, ) -> Dict:
super().__init__(**A_ )
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =attention_dropout
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =hidden_act
@classmethod
def __UpperCAmelCase ( cls, A_, **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
UpperCAmelCase__ , UpperCAmelCase__ =cls.get_config_dict(A_, **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_, **A_ )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self, A_=None, A_=None, A_=512, A_=2.65_92, A_=256, **A_, ) -> str:
super().__init__(**A_ )
if text_config is None:
UpperCAmelCase__ ={}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase__ ={}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCAmelCase__ =BlipTextConfig(**A_ )
UpperCAmelCase__ =BlipVisionConfig(**A_ )
UpperCAmelCase__ =self.vision_config.hidden_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =logit_scale_init_value
UpperCAmelCase__ =1.0
UpperCAmelCase__ =0.02
UpperCAmelCase__ =image_text_hidden_size
@classmethod
def __UpperCAmelCase ( cls, A_, A_, **A_ ) -> Tuple:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **A_ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =copy.deepcopy(self.__dict__ )
UpperCAmelCase__ =self.text_config.to_dict()
UpperCAmelCase__ =self.vision_config.to_dict()
UpperCAmelCase__ =self.__class__.model_type
return output
| 510 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _UpperCAmelCase ( self , a__=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
A = np.random.RandomState(a__ )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self ) -> int:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs()
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs()
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> int:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> int:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs()
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs()
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs()
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Dict:
A = ort.SessionOptions()
A = False
return options
def _UpperCAmelCase ( self ) -> int:
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
A = """A fantasy landscape, trending on artstation"""
A = np.random.RandomState(0 )
A = pipe(
prompt=a__ , image=a__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type="""np""" , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCAmelCase ( self ) -> List[Any]:
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
A = """A fantasy landscape, trending on artstation"""
A = np.random.RandomState(0 )
A = pipe(
prompt=a__ , image=a__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type="""np""" , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 641 |
_lowercase : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 641 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : int = IFInpaintingPipeline
A : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A : int = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case__ ( self : int ):
return self._get_dummy_components()
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=0 ):
if str(_lowerCAmelCase ).startswith("""mps""" ):
__snake_case : Union[str, Any] = torch.manual_seed(_lowerCAmelCase )
else:
__snake_case : int = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__snake_case : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__snake_case : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case__ ( self : str ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : int ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case__ ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case__ ( self : List[str] ):
self._test_save_load_local()
def snake_case__ ( self : int ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 390 | lowercase_ = {str(digit): digit**5 for digit in range(10)}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 390 | 1 |
"""simple docstring"""
import math
def A_ (__a ):
'''simple docstring'''
A_ = [True] * n
A_ = False
A_ = False
A_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A_ = i * 2
while index < n:
A_ = False
A_ = index + i
A_ = [2]
for i in range(3 , __a , 2 ):
if is_prime[i]:
primes.append(__a )
return primes
def A_ (__a = 9999_6666_3333 ):
'''simple docstring'''
A_ = math.floor(math.sqrt(__a ) ) + 100
A_ = prime_sieve(__a )
A_ = 0
A_ = 0
A_ = primes[prime_index]
while (last_prime**2) <= limit:
A_ = primes[prime_index + 1]
A_ = last_prime**2
A_ = next_prime**2
# Get numbers divisible by lps(current)
A_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 115 |
"""simple docstring"""
import argparse
import os
import re
UpperCamelCase_ : Any = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase_ : Optional[int] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCamelCase_ : Tuple = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def A_ (__a , __a = False ):
'''simple docstring'''
with open(__a , "r" , encoding="utf-8" ) as f:
A_ = f.read()
A_ = content.split("\n" )
A_ = []
A_ = 0
while line_idx < len(__a ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A_ = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
A_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A_ = sorted(__a , key=lambda __a : _re_identifier.search(__a ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__a , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__a ) )
elif "\n".join(__a ) != content:
return True
def A_ (__a = False ):
'''simple docstring'''
A_ = [os.path.join(__a , __a ) for f in os.listdir(__a ) if f.endswith(".py" )]
A_ = [sort_auto_mapping(__a , overwrite=__a ) for fname in fnames]
if not overwrite and any(__a ):
A_ = [f for f, d in zip(__a , __a ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(__a )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 115 | 1 |
'''simple docstring'''
from __future__ import annotations
a__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
a__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __UpperCAmelCase ( __a : str ) -> list[float]:
"""simple docstring"""
_a : List[Any] = []
_a : List[Any] = len(__a )
for i in range(__a ):
_a : Dict = -1
for j in range(i + 1 ,__a ):
if arr[i] < arr[j]:
_a : List[str] = arr[j]
break
result.append(__a )
return result
def __UpperCAmelCase ( __a : str ) -> list[float]:
"""simple docstring"""
_a : str = []
for i, outer in enumerate(__a ):
_a : int = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : List[str] = inner
break
result.append(__a )
return result
def __UpperCAmelCase ( __a : int ) -> list[float]:
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : List[str] = []
_a : List[str] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : List[str] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
a__ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 710 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
a__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
a__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , _a , _a , _a=4 , _a=False ) -> List[Any]:
_a : Dict = compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 578 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowercase__ , dtype=jnp.bfloataa )
__A , __A =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
__A =controlnet_params
__A ='''bird'''
__A =jax.device_count()
__A =pipe.prepare_text_inputs([prompts] * num_samples )
__A =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__A =pipe.prepare_image_inputs([canny_image] * num_samples )
__A =jax.random.PRNGKey(0 )
__A =jax.random.split(lowercase__ , jax.device_count() )
__A =replicate(lowercase__ )
__A =shard(lowercase__ )
__A =shard(lowercase__ )
__A =pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=5_0 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__A =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__A =jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A =jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowercase__ , dtype=jnp.bfloataa )
__A , __A =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
__A =controlnet_params
__A ='''Chef in the kitchen'''
__A =jax.device_count()
__A =pipe.prepare_text_inputs([prompts] * num_samples )
__A =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__A =pipe.prepare_image_inputs([pose_image] * num_samples )
__A =jax.random.PRNGKey(0 )
__A =jax.random.split(lowercase__ , jax.device_count() )
__A =replicate(lowercase__ )
__A =shard(lowercase__ )
__A =shard(lowercase__ )
__A =pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=5_0 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__A =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__A =jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A =jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 184 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __A : List[str] , __A : Optional[Any] , __A : Union[str, Any] , __A : int="attention" ) ->str:
__A =params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__A =params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__A =params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__A =params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def A__ ( __A : Optional[int] , __A : List[str] , __A : Any , __A : Tuple=False ) ->Any:
if split_mlp_wi:
__A =params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__A =params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__A =(wi_a, wi_a)
else:
__A =params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__A =params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def A__ ( __A : int , __A : Any , __A : Any , __A : Optional[Any] ) ->str:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def A__ ( __A : dict , *, __A : int , __A : bool ) ->Optional[Any]:
__A =traverse_util.flatten_dict(variables['''target'''] )
__A ={'''/'''.join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__A ='''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , __A )
__A =collections.OrderedDict()
# Shared embeddings.
__A =old['''token_embedder/embedding''']
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
__A =tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_attention_layer_norm''' )
__A , __A , __A , __A =tax_attention_lookup(__A , __A , '''encoder''' , '''attention''' )
__A =layer_norm
__A =k.T
__A =o.T
__A =q.T
__A =v.T
# Block i, layer 1 (MLP).
__A =tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_mlp_layer_norm''' )
__A , __A =tax_mlp_lookup(__A , __A , '''encoder''' , __A )
__A =layer_norm
if split_mlp_wi:
__A =wi[0].T
__A =wi[1].T
else:
__A =wi.T
__A =wo.T
__A =old[
'''encoder/relpos_bias/rel_embedding'''
].T
__A =old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
__A =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_self_attention_layer_norm''' )
__A , __A , __A , __A =tax_attention_lookup(__A , __A , '''decoder''' , '''self_attention''' )
__A =layer_norm
__A =k.T
__A =o.T
__A =q.T
__A =v.T
# Block i, layer 1 (Cross Attention).
__A =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__A , __A , __A , __A =tax_attention_lookup(__A , __A , '''decoder''' , '''encoder_decoder_attention''' )
__A =layer_norm
__A =k.T
__A =o.T
__A =q.T
__A =v.T
# Block i, layer 2 (MLP).
__A =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_mlp_layer_norm''' )
__A , __A =tax_mlp_lookup(__A , __A , '''decoder''' , __A )
__A =layer_norm
if split_mlp_wi:
__A =wi[0].T
__A =wi[1].T
else:
__A =wi.T
__A =wo.T
__A =old['''decoder/decoder_norm/scale''']
__A =old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__A =old['''decoder/logits_dense/kernel'''].T
return new
def A__ ( __A : Union[str, Any] , __A : bool ) ->Any:
__A =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__A =state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__A =state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__A =state_dict['''shared.weight''']
return state_dict
def A__ ( __A : str , __A : Optional[int] , __A : int , __A : Optional[Any] ) ->Tuple:
__A =checkpoints.load_tax_checkpoint(__A )
__A =convert_tax_to_pytorch(__A , num_layers=config.num_layers , is_encoder_only=__A )
__A =make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def A__ ( __A : List[str] , __A : str , __A : str , __A : bool = False ) ->List[str]:
__A =TaConfig.from_json_file(__A )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__A =TaEncoderModel(__A )
else:
__A =TaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print('''Done''' )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
_lowerCamelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 184 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = KandinskyVaaPriorPipeline
__lowerCamelCase : List[str] = ['prompt']
__lowerCamelCase : str = ['prompt', 'negative_prompt']
__lowerCamelCase : Union[str, Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : Union[str, Any] = False
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return 32
@property
def a__ (self ) -> int:
"""simple docstring"""
return 32
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def a__ (self ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def a__ (self ) -> str:
"""simple docstring"""
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(A )
@property
def a__ (self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
_a = PriorTransformer(**A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ (self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_a = CLIPVisionModelWithProjection(A )
return model
@property
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = CLIPImageProcessor(
crop_size=224 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.dummy_prior
_a = self.dummy_image_encoder
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_image_processor
_a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=A , clip_sample_range=10.0 , )
_a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def a__ (self , A , A=0 ) -> Optional[int]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**A )
_a = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_a = pipe(**self.get_dummy_inputs(A ) )
_a = output.image_embeds
_a = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
_a = image[0, -10:]
_a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = torch_device == '''cpu'''
_a = True
_a = False
self._test_inference_batch_single_identical(
test_max_difference=A , relax_max_difference=A , test_mean_pixel_difference=A , )
@skip_mps
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = torch_device == '''cpu'''
_a = False
self._test_attention_slicing_forward_pass(
test_max_difference=A , test_mean_pixel_difference=A , )
| 715 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
_a = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_a = set()
return any(
node not in visited and depth_first_search(__A , __A , __A , __A)
for node in graph)
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
visited.add(__A)
rec_stk.add(__A)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__A , __A , __A , __A):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__A)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 | 0 |
import math
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if (
not isinstance(__UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if (
not isinstance(__UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 493 |
def UpperCamelCase( __UpperCamelCase : Any ):
if not head:
return True
# split the list to two parts
lowerCAmelCase_ , lowerCAmelCase_ : Any = head.next, head
while fast and fast.next:
lowerCAmelCase_ : List[Any] = fast.next.next
lowerCAmelCase_ : Union[str, Any] = slow.next
lowerCAmelCase_ : Union[str, Any] = slow.next
lowerCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase_ : str = None
while second:
lowerCAmelCase_ : List[str] = second.next
lowerCAmelCase_ : List[Any] = node
lowerCAmelCase_ : Tuple = second
lowerCAmelCase_ : Optional[int] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase_ : Union[str, Any] = node.next
lowerCAmelCase_ : str = head.next
return True
def UpperCamelCase( __UpperCamelCase : str ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase_ : Any = head
while fast and fast.next:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase_ : List[str] = [slow.val]
while slow.next:
lowerCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase_ : Optional[int] = cur.next
return True
def UpperCamelCase( __UpperCamelCase : Any ):
if not head or not head.next:
return True
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : List[Any] = 0
while head:
if head.val in d:
d[head.val].append(__UpperCamelCase )
else:
lowerCAmelCase_ : Tuple = [pos]
lowerCAmelCase_ : Tuple = head.next
pos += 1
lowerCAmelCase_ : int = pos - 1
lowerCAmelCase_ : int = 0
for v in d.values():
if len(__UpperCamelCase ) % 2 != 0:
middle += 1
else:
lowerCAmelCase_ : Any = 0
for i in range(0 ,len(__UpperCamelCase ) ):
if v[i] + v[len(__UpperCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 171 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ ( A ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , "embed_dim" ) )
self.parent.assertTrue(hasattr(A_ , "num_heads" ) )
class A__ :
"""simple docstring"""
def __init__( self : Tuple , A_ : int , A_ : Dict=1_3 , A_ : int=6_4 , A_ : str=3 , A_ : Optional[int]=[1_6, 4_8, 9_6] , A_ : int=[1, 3, 6] , A_ : Optional[int]=[1, 2, 1_0] , A_ : Any=[7, 3, 3] , A_ : Tuple=[4, 2, 2] , A_ : str=[2, 1, 1] , A_ : Optional[Any]=[2, 2, 2] , A_ : Union[str, Any]=[False, False, True] , A_ : Union[str, Any]=[0.0, 0.0, 0.0] , A_ : Any=0.02 , A_ : Optional[int]=1E-12 , A_ : str=True , A_ : List[Any]=True , A_ : Union[str, Any]=2 , ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Optional[Any] = patch_sizes
_lowerCAmelCase : Optional[int] = patch_stride
_lowerCAmelCase : List[str] = patch_padding
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Union[str, Any] = stride_kv
_lowerCAmelCase : Tuple = depth
_lowerCAmelCase : List[str] = cls_token
_lowerCAmelCase : Tuple = attention_drop_rate
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Union[str, Any] , A_ : str , A_ : Optional[int] , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = TFCvtModel(config=A_ )
_lowerCAmelCase : str = model(A_ , training=A_ )
_lowerCAmelCase : List[Any] = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __magic_name__ ( self : Any , A_ : Tuple , A_ : int , A_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Optional[Any] = TFCvtForImageClassification(A_ )
_lowerCAmelCase : Optional[Any] = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Any = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowercase : Tuple = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = False
_lowercase : Any = False
_lowercase : int = False
_lowercase : int = False
_lowercase : Tuple = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFCvtModelTester(self )
_lowerCAmelCase : Optional[Any] = TFCvtConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(A_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(A_ )
_lowerCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(A_ : Any , A_ : Any , A_ : Dict ):
_lowerCAmelCase : Tuple = model_class(A_ )
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(A_ , A_ ) )
_lowerCAmelCase : Tuple = outputs.hidden_states
_lowerCAmelCase : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[str] = True
check_hidden_states_output(A_ , A_ , A_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFCvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ) -> Dict:
"""simple docstring"""
_lowerCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
_lowerCAmelCase : str = model(**A_ )
# verify the logits
_lowerCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A_ )
_lowerCAmelCase : Any = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A_ , atol=1E-4 ) )
| 503 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case ( ctypes.Structure ):
__lowerCAmelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def __lowerCamelCase ( ) ->Optional[Any]:
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def __lowerCamelCase ( ) ->Optional[Any]:
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def __lowerCamelCase ( ) ->Union[str, Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 368 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
A : List[Any] = True
from torch.cuda.amp import autocast
A : Any = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
__UpperCAmelCase : Optional[bool] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCAmelCase : Optional[bool] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to log verbose messages or not."""} ,)
__UpperCAmelCase : Optional[float] =field(
default=2.0 ,metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.5 ,metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.999_995 ,metadata={"""help""": """Decay of gumbel temperature during training."""} )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = logging.WARNING
if model_args.verbose_logging:
__lowerCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowerCAmelCase = logging.INFO
logger.setLevel(_UpperCamelCase )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(
default="""train""" ,metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} ,)
__UpperCAmelCase : Optional[str] =field(
default="""validation""" ,metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} ,)
__UpperCAmelCase : Optional[str] =field(
default="""file""" ,metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} ,)
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__UpperCAmelCase : Optional[int] =field(
default=1 ,metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} ,)
__UpperCAmelCase : Optional[int] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
__UpperCAmelCase : Optional[float] =field(
default=20.0 ,metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : WavaVecaForPreTraining
__UpperCAmelCase : WavaVecaFeatureExtractor
__UpperCAmelCase : Union[bool, str] ="longest"
__UpperCAmelCase : Optional[int] =None
__UpperCAmelCase : Optional[int] =None
def __call__( self , __a ):
# reformat list to dict and set to pytorch format
__lowerCAmelCase = self.feature_extractor.pad(
__a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
__lowerCAmelCase = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
__lowerCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowerCAmelCase = 1
__lowerCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowerCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__a , min_masks=2 , )
return batch
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , __a=1 , __a=0 , __a=1.0 , **__a ):
super().__init__(*__a , **__a )
__lowerCAmelCase = 0
__lowerCAmelCase = max_gumbel_temp
__lowerCAmelCase = min_gumbel_temp
__lowerCAmelCase = gumbel_temp_decay
def snake_case ( self , __a , __a ):
model.train()
__lowerCAmelCase = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(__a , __a )
else:
__lowerCAmelCase = self.compute_loss(__a , __a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase )
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase )
def prepare_dataset(_UpperCamelCase ):
# check that all files have the correct sampling rate
__lowerCAmelCase , __lowerCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowerCAmelCase = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
__lowerCAmelCase = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowerCAmelCase = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowerCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
__lowerCAmelCase = WavaVecaForPreTraining(_UpperCamelCase )
__lowerCAmelCase = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
__lowerCAmelCase = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 636 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'Salesforce/codegen-350M-mono': 2048,
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: Tuple = ['input_ids', 'attention_mask']
__lowerCamelCase: Tuple = CodeGenTokenizer
def __init__( self : Dict , a : Optional[Any]=None , a : Any=None , a : List[Any]=None , a : int="<|endoftext|>" , a : Optional[Any]="<|endoftext|>" , a : Union[str, Any]="<|endoftext|>" , a : Any=False , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , add_prefix_space=a , **a , )
if kwargs.pop("add_bos_token" , a ):
lowercase_ : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
lowercase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : List[Any] = getattr(a , pre_tok_state.pop("type" ) )
lowercase_ : Any = add_prefix_space
lowercase_ : List[Any] = pre_tok_class(**a )
lowercase_ : int = add_prefix_space
def lowerCAmelCase__ ( self : Tuple , *a : List[str] , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Dict , *a : Optional[Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Dict = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Optional[Any] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , a : bool = False , a : bool = None , a : Optional[List[str]] = None , **a : Optional[Any] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = super().decode(
token_ids=a , skip_special_tokens=a , clean_up_tokenization_spaces=a , **a , )
if truncate_before_pattern is not None and len(a ) > 0:
lowercase_ : Optional[Any] = self.truncate(a , a )
return decoded_text
def lowerCAmelCase__ ( self : Optional[int] , a : str , a : List[Any] ):
'''simple docstring'''
def find_re(a : Tuple , a : List[str] , a : Optional[int] ):
lowercase_ : List[Any] = pattern.search(a , a )
return m.start() if m else -1
lowercase_ : Tuple = [re.compile(a , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase_ : int = list(re.finditer("^print" , a , re.MULTILINE ) )
if len(a ) > 1:
lowercase_ : Dict = completion[: prints[1].start()]
lowercase_ : Union[str, Any] = list(re.finditer("^def" , a , re.MULTILINE ) )
if len(a ) > 1:
lowercase_ : Optional[Any] = completion[: defs[1].start()]
lowercase_ : Optional[int] = 0
lowercase_ : Union[str, Any] = [
pos for pos in [find_re(a , a , a ) for terminal in terminals] if pos != -1
]
if len(a ) > 0:
return completion[: min(a )]
else:
return completion
| 721 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
__SCREAMING_SNAKE_CASE : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__SCREAMING_SNAKE_CASE : Dict = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowercase_ , output_all_encodings=lowercase_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowercase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__SCREAMING_SNAKE_CASE : Tuple = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(get_home_dir() , '''models''' )
__SCREAMING_SNAKE_CASE : Dict = _load_vocab(lowercase_ , lowercase_ , lowercase_ , cls=lowercase_ )
__SCREAMING_SNAKE_CASE : List[Any] = nlp.model.BERTModel(
lowercase_ , len(lowercase_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowercase_ , use_token_type_embed=lowercase_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowercase_ , use_decoder=lowercase_ , )
original_bort.load_parameters(lowercase_ , cast_dtype=lowercase_ , ignore_extra=lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowercase_ ),
}
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_dict(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[int] = BertForMaskedLM(lowercase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase_ : Union[str, Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = hf_param.shape
__SCREAMING_SNAKE_CASE : List[Any] = to_torch(params[gluon_param] )
__SCREAMING_SNAKE_CASE : Tuple = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__SCREAMING_SNAKE_CASE : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__SCREAMING_SNAKE_CASE : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__SCREAMING_SNAKE_CASE : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__SCREAMING_SNAKE_CASE : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self
__SCREAMING_SNAKE_CASE : str = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__SCREAMING_SNAKE_CASE : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__SCREAMING_SNAKE_CASE : int = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__SCREAMING_SNAKE_CASE : Optional[int] = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__SCREAMING_SNAKE_CASE : int = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__SCREAMING_SNAKE_CASE : List[Any] = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output
__SCREAMING_SNAKE_CASE : Any = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
__SCREAMING_SNAKE_CASE : Any = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__SCREAMING_SNAKE_CASE : Tuple = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate
__SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__SCREAMING_SNAKE_CASE : BertOutput = layer.output
__SCREAMING_SNAKE_CASE : str = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__SCREAMING_SNAKE_CASE : Optional[int] = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__SCREAMING_SNAKE_CASE : Dict = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__SCREAMING_SNAKE_CASE : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__SCREAMING_SNAKE_CASE : Optional[int] = RobertaTokenizer.from_pretrained('''roberta-base''' )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode_plus(lowercase_ )['''input_ids''']
# Get gluon output
__SCREAMING_SNAKE_CASE : Any = mx.nd.array([input_ids] )
__SCREAMING_SNAKE_CASE : Optional[Any] = original_bort(inputs=lowercase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase_ )
__SCREAMING_SNAKE_CASE : str = BertModel.from_pretrained(lowercase_ )
hf_bort_model.eval()
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode_plus(lowercase_ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : List[Any] = hf_bort_model(**lowercase_ )[0]
__SCREAMING_SNAKE_CASE : str = output_gluon[0].asnumpy()
__SCREAMING_SNAKE_CASE : int = output_hf[0].detach().numpy()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__SCREAMING_SNAKE_CASE : Tuple = np.allclose(lowercase_ , lowercase_ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCamelCase__ = '''CIDAS/clipseg-rd64-refined'''
lowerCamelCase__ = '''image_segmenter'''
lowerCamelCase__ = CLIPSegForImageSegmentation
lowerCamelCase__ = ['''image''', '''text''']
lowerCamelCase__ = ['''image''']
def __init__( self :Dict , *_lowerCamelCase :Union[str, Any] , **_lowerCamelCase :Tuple ):
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :"Image" , _lowerCamelCase :str ):
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Optional[int] ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = self.model(**_lowerCamelCase ).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Tuple ):
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.cpu().detach().numpy()
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 674 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase ( __UpperCAmelCase ):
a : Tuple = """trajectory_transformer"""
a : Dict = ["""past_key_values"""]
a : Any = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase=100 , UpperCamelCase=5 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=249 , UpperCamelCase=6 , UpperCamelCase=17 , UpperCamelCase=25 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=128 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.00_06 , UpperCamelCase=512 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=1 , UpperCamelCase=True , UpperCamelCase=1 , UpperCamelCase=50_256 , UpperCamelCase=50_256 , **UpperCamelCase , ):
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = action_weight
_SCREAMING_SNAKE_CASE = reward_weight
_SCREAMING_SNAKE_CASE = value_weight
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = block_size
_SCREAMING_SNAKE_CASE = action_dim
_SCREAMING_SNAKE_CASE = observation_dim
_SCREAMING_SNAKE_CASE = transition_dim
_SCREAMING_SNAKE_CASE = learning_rate
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = attn_pdrop
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kaiming_initializer_range
_SCREAMING_SNAKE_CASE = use_cache
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) | 493 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_snake_case : Optional[int] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
_snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ):
_SCREAMING_SNAKE_CASE = "https://pypi.org/pypi/diffusers/json"
_SCREAMING_SNAKE_CASE = json.loads(request.urlopen(_SCREAMING_SNAKE_CASE ).read() )["releases"].keys()
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : version.Version(_SCREAMING_SNAKE_CASE ) )
def _a ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
init_hf_modules()
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
# Imports of the form `import .xxx`
_SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+\.(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE : List[str] ):
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = [module_file]
_SCREAMING_SNAKE_CASE = []
# Let's recurse through all relative imports
while not no_change:
_SCREAMING_SNAKE_CASE = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ).parent
_SCREAMING_SNAKE_CASE = [str(module_path / m ) for m in new_imports]
_SCREAMING_SNAKE_CASE = [f for f in new_import_files if f not in all_relative_imports]
_SCREAMING_SNAKE_CASE = [F'{f}.py' for f in new_import_files]
_SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(_SCREAMING_SNAKE_CASE )
return all_relative_imports
def _a ( _SCREAMING_SNAKE_CASE : str ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
# Imports of the form `import xxx`
_SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
_SCREAMING_SNAKE_CASE = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_SCREAMING_SNAKE_CASE = list(set(_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE = []
for imp in imports:
try:
importlib.import_module(_SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(_SCREAMING_SNAKE_CASE )}. Run `pip install {" ".join(_SCREAMING_SNAKE_CASE )}`' )
return get_relative_imports(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = module_path.replace(os.path.sep , "." )
_SCREAMING_SNAKE_CASE = importlib.import_module(_SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(_SCREAMING_SNAKE_CASE )
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : List[Any] ):
from ..pipelines import DiffusionPipeline
_SCREAMING_SNAKE_CASE = dict(inspect.getmembers(_SCREAMING_SNAKE_CASE , inspect.isclass ) )
_SCREAMING_SNAKE_CASE = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _SCREAMING_SNAKE_CASE )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
_SCREAMING_SNAKE_CASE = cls
return pipeline_class
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , ):
_SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = module_file_or_url
_SCREAMING_SNAKE_CASE = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_SCREAMING_SNAKE_CASE = get_diffusers_versions()
# cut ".dev0"
_SCREAMING_SNAKE_CASE = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_SCREAMING_SNAKE_CASE = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_SCREAMING_SNAKE_CASE = F'v{revision}'
elif revision == "main":
_SCREAMING_SNAKE_CASE = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_SCREAMING_SNAKE_CASE = COMMUNITY_PIPELINES_URL.format(revision=_SCREAMING_SNAKE_CASE , pipeline=_SCREAMING_SNAKE_CASE )
try:
_SCREAMING_SNAKE_CASE = cached_download(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE = "git"
_SCREAMING_SNAKE_CASE = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_SCREAMING_SNAKE_CASE = hf_hub_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_SCREAMING_SNAKE_CASE = check_imports(_SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
_SCREAMING_SNAKE_CASE = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
_SCREAMING_SNAKE_CASE = F'{module_needed}.py'
shutil.copy(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = use_auth_token
elif use_auth_token is True:
_SCREAMING_SNAKE_CASE = HfFolder.get_token()
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = model_info(_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_SCREAMING_SNAKE_CASE = submodule_path / commit_hash
_SCREAMING_SNAKE_CASE = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_SCREAMING_SNAKE_CASE , F'{module_needed}.py' , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , **_SCREAMING_SNAKE_CASE : Tuple , ):
_SCREAMING_SNAKE_CASE = get_cached_module_file(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return get_class_in_module(_SCREAMING_SNAKE_CASE , final_module.replace(".py" , "" ) ) | 493 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 3 , __lowerCAmelCase = 7 , __lowerCAmelCase = 100_0000 ) -> Optional[Any]:
snake_case__ = 0
snake_case__ = 1
for current_denominator in range(1 , limit + 1 ):
snake_case__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case__ = current_numerator
snake_case__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 33 |
from math import sqrt
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( UpperCamelCase__ = 10001 ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 362 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase_ : Union[str, Any] = 50003
UpperCAmelCase_ : str = 50002
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = PLBartTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Tuple = PLBartTokenizer(lowercase_ , language_codes='''base''' , keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PLBartTokenizer(lowercase_ , language_codes='''base''' , keep_accents=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.tokenize('''This is a test''')
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(lowercase_) for x in range(end - 4 , lowercase_)]
self.assertListEqual(lowercase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''])
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(lowercase_).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_) , lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = PLBartTokenizer(lowercase_ , language_codes='''multi''' , keep_accents=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = [tokenizer.convert_ids_to_tokens(lowercase_) for x in range(end - 7 , lowercase_)]
self.assertListEqual(
lowercase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''])
SCREAMING_SNAKE_CASE_ : Dict = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "uclanlp/plbart-python-en_XX"
__UpperCamelCase = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__UpperCamelCase = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__UpperCamelCase = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''')
SCREAMING_SNAKE_CASE_ : int = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50003)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.assertIn(lowercase_ , self.tokenizer.all_special_ids)
SCREAMING_SNAKE_CASE_ : str = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertNotIn(self.tokenizer.eos_token , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowercase_)
self.assertEqual(len(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__''']) , [50004, 50001])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : int = PLBartTokenizer.from_pretrained(lowercase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : int = targets['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[Any] = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''')
self.assertEqual(
nested_simplify(lowercase_) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50001,
} , )
| 176 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
UpperCAmelCase_ : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Tuple = multiprocessing.cpu_count()
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : List[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 176 | 1 |
import os
def __magic_name__ ( ):
'''simple docstring'''
with open(os.path.dirname(__a ) + """/p022_names.txt""" ) as file:
UpperCamelCase__ = str(file.readlines()[0] )
UpperCamelCase__ = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i, name in enumerate(__a ):
for letter in name:
name_score += ord(__a ) - 64
total_score += (i + 1) * name_score
UpperCamelCase__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 513 |
def __magic_name__ ( __a : list[int] ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 513 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class _lowerCAmelCase ( unittest.TestCase , a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = load_tool('text-question-answering' )
self.tool.setup()
lowerCAmelCase__ :Tuple = load_tool('text-question-answering' , remote=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.tool(__UpperCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.remote_tool(__UpperCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.tool(text=__UpperCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.remote_tool(text=__UpperCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
| 560 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Optional[int] = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = len(__UpperCAmelCase )
lowerCAmelCase__ :str = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[str] = [sources]
if sinks is int:
lowerCAmelCase__ :Optional[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Optional[int] = max_input_flow
lowerCAmelCase__ :Tuple = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = flow_network
lowerCAmelCase__ :List[Any] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Dict = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :int = [0] * self.verticies_count
lowerCAmelCase__ :str = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :int = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Tuple = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Any = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Any = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560 | 1 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Dict ='\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : str ='\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[Any] ='\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Optional[int]=False , ) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__SCREAMING_SNAKE_CASE : Tuple = np.array([re.sub(lowerCAmelCase__ , '''''' , lowerCAmelCase__ ) for x in predictions] )
__SCREAMING_SNAKE_CASE : Dict = np.array([re.sub(lowerCAmelCase__ , '''''' , lowerCAmelCase__ ) for x in references] )
else:
__SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCAmelCase__ )
if ignore_case:
__SCREAMING_SNAKE_CASE : Any = np.char.lower(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.lower(lowerCAmelCase__ )
if ignore_punctuation:
__SCREAMING_SNAKE_CASE : str = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__SCREAMING_SNAKE_CASE : List[str] = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
if ignore_numbers:
__SCREAMING_SNAKE_CASE : Union[str, Any] = string.digits.maketrans('''''' , '''''' , string.digits )
__SCREAMING_SNAKE_CASE : str = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = predictions == references
return {"exact_match": np.mean(lowerCAmelCase__ ) * 100}
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 48 |
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48 | 1 |
from typing import Any
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not input_list:
return []
UpperCAmelCase_ : Any = [input_list.count(_lowercase ) for value in input_list]
UpperCAmelCase_ : Dict = max(_lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 583 | 0 |
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( *_lowerCamelCase : float) -> bool:
'''simple docstring'''
__UpperCamelCase : Tuple = len(_lowerCamelCase) > 0 and all(value > 0.0 for value in values)
return result
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(_lowerCamelCase , _lowerCamelCase)
else ValueError("Input Error: Molar mass values must greater than 0.")
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2) , 6)
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2) / molar_mass , 6)
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
) | 94 |
import re
import string
import numpy as np
import datasets
lowercase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase : List[str] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self :int , a :Optional[Any] , a :Dict , a :Optional[int]=None , a :int=False , a :Tuple=False , a :Optional[int]=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCamelCase : List[Any] = np.array([re.sub(a , "" , a ) for x in predictions] )
__UpperCamelCase : Optional[Any] = np.array([re.sub(a , "" , a ) for x in references] )
else:
__UpperCamelCase : Optional[int] = np.asarray(a )
__UpperCamelCase : List[str] = np.asarray(a )
if ignore_case:
__UpperCamelCase : Optional[int] = np.char.lower(a )
__UpperCamelCase : str = np.char.lower(a )
if ignore_punctuation:
__UpperCamelCase : Tuple = string.punctuation.maketrans("" , "" , string.punctuation )
__UpperCamelCase : int = np.char.translate(a , table=a )
__UpperCamelCase : str = np.char.translate(a , table=a )
if ignore_numbers:
__UpperCamelCase : List[str] = string.digits.maketrans("" , "" , string.digits )
__UpperCamelCase : Tuple = np.char.translate(a , table=a )
__UpperCamelCase : Union[str, Any] = np.char.translate(a , table=a )
__UpperCamelCase : List[Any] = predictions == references
return {"exact_match": np.mean(a ) * 1_0_0} | 94 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
if len(_a ) == 0:
return False
lowerCAmelCase = len(_a ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _a )
else:
return binary_search(a_list[midpoint + 1 :] , _a )
if __name__ == "__main__":
UpperCAmelCase = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase = [int(item.strip()) for item in user_input.split(',')]
UpperCAmelCase = int(input('Enter the number to be found in the list:\n').strip())
UpperCAmelCase = '' if binary_search(sequence, target) else 'not '
print(F'''{target} was {not_str}found in {sequence}''') | 433 |
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens']) | 25 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A: Optional[Any] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'albert'
def __init__( self , _lowercase=3_0000 , _lowercase=128 , _lowercase=4096 , _lowercase=12 , _lowercase=1 , _lowercase=64 , _lowercase=1_6384 , _lowercase=1 , _lowercase="gelu_new" , _lowercase=0 , _lowercase=0 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=0.1 , _lowercase="absolute" , _lowercase=0 , _lowercase=2 , _lowercase=3 , **_lowercase , ) -> Any:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Dict = vocab_size
lowercase_ : List[str] = embedding_size
lowercase_ : Any = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Tuple = num_hidden_groups
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : int = inner_group_num
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : List[str] = classifier_dropout_prob
lowercase_ : Optional[int] = position_embedding_type
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
lowerCamelCase_ = 'maskformer-swin'
lowerCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , UpperCAmelCase__ : Tuple=224 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Union[str, Any]=96 , UpperCAmelCase__ : int=[2, 2, 6, 2] , UpperCAmelCase__ : str=[3, 6, 12, 24] , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : List[str]=4.0 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Any =image_size
lowercase : Tuple =patch_size
lowercase : Any =num_channels
lowercase : Optional[int] =embed_dim
lowercase : str =depths
lowercase : Any =len(UpperCAmelCase__ )
lowercase : str =num_heads
lowercase : Dict =window_size
lowercase : List[str] =mlp_ratio
lowercase : Union[str, Any] =qkv_bias
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : int =attention_probs_dropout_prob
lowercase : Optional[Any] =drop_path_rate
lowercase : Tuple =hidden_act
lowercase : List[Any] =use_absolute_embeddings
lowercase : Any =layer_norm_eps
lowercase : List[Any] =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : Any =int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowercase : Union[str, Any] =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowercase , lowercase : Optional[int] =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 92 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __A ( lowerCAmelCase_ ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 414 | 0 |
"""simple docstring"""
import json
import sys
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Tuple:
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase : str = json.load(UpperCamelCase__ )
lowerCamelCase : str = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(UpperCamelCase__ ):
lowerCamelCase : List[str] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : Tuple = """|--------|"""
lowerCamelCase : Any = """| new / old (diff) |"""
for metric_name in sorted(UpperCamelCase__ ):
lowerCamelCase : Tuple = benchmark_res[metric_name]
lowerCamelCase : Optional[int] = metric_vals["""new"""]
lowerCamelCase : Dict = metric_vals.get("""old""" , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = metric_vals.get("""diff""" , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = F' {new_val:f}' if isinstance(UpperCamelCase__ , (int, float) ) else """None"""
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
__lowerCamelCase :Dict = sys.argv[1]
__lowerCamelCase :List[str] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 42 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42 | 1 |
'''simple docstring'''
A__ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
A__ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCamelCase : Union[str, Any] = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(UpperCAmelCase_ )}'
)
raise ValueError(UpperCAmelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase : List[Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class A__ ( unittest.TestCase ):
A__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def A ( self : Optional[int] , _a : int , _a : Dict , _a : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def A ( self : str , _a : str , _a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_a , {'sequence': ANY(_a ), 'labels': [ANY(_a )], 'scores': [ANY(_a )]} )
# No kwarg
_SCREAMING_SNAKE_CASE =classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_a , {'sequence': ANY(_a ), 'labels': [ANY(_a )], 'scores': [ANY(_a )]} )
_SCREAMING_SNAKE_CASE =classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_a , {'sequence': ANY(_a ), 'labels': [ANY(_a )], 'scores': [ANY(_a )]} )
_SCREAMING_SNAKE_CASE =classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_a , {'sequence': ANY(_a ), 'labels': [ANY(_a ), ANY(_a )], 'scores': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_SCREAMING_SNAKE_CASE =classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_a , {'sequence': ANY(_a ), 'labels': [ANY(_a ), ANY(_a )], 'scores': [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_SCREAMING_SNAKE_CASE =classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_a , {'sequence': ANY(_a ), 'labels': [ANY(_a )], 'scores': [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
_SCREAMING_SNAKE_CASE =classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_a , [
{'sequence': ANY(_a ), 'labels': [ANY(_a ), ANY(_a )], 'scores': [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
_SCREAMING_SNAKE_CASE =classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_a , [
{'sequence': ANY(_a ), 'labels': [ANY(_a ), ANY(_a )], 'scores': [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_a ):
classifier(_a , candidate_labels='politics' )
with self.assertRaises(_a ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_a ):
classifier('Who are you voting for in 2020?' , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_a ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_a , )
self.run_entailment_id(_a )
def A ( self : List[str] , _a : Pipeline ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =zero_shot_classifier.model.config
_SCREAMING_SNAKE_CASE =config.labelaid
_SCREAMING_SNAKE_CASE =zero_shot_classifier.entailment_id
_SCREAMING_SNAKE_CASE ={'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_SCREAMING_SNAKE_CASE ={'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_SCREAMING_SNAKE_CASE ={'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_SCREAMING_SNAKE_CASE ={'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_SCREAMING_SNAKE_CASE =original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def A ( self : int ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_SCREAMING_SNAKE_CASE =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def A ( self : Dict ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_SCREAMING_SNAKE_CASE =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_SCREAMING_SNAKE_CASE =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_SCREAMING_SNAKE_CASE =zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_SCREAMING_SNAKE_CASE =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_SCREAMING_SNAKE_CASE =zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 405 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCamelCase : int =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE , id=SCREAMING_SNAKE_CASE )
| 363 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 363 | 1 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[Any] = (EulerDiscreteScheduler,)
__lowercase :Optional[int] = 10
def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCamelCase__ )
return config
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3 | 142 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[int] = ["image_processor", "tokenizer"]
__lowercase :int = "ChineseCLIPImageProcessor"
__lowercase :Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
lowerCamelCase_ = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class | 142 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , ):
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
_A = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = 1
_A = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
_A = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = True
_A = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def lowerCAmelCase_ ( self : int ):
self.enable_attention_slicing(UpperCamelCase__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : str ):
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str = 512 , _UpperCAmelCase : Any = 512 , _UpperCAmelCase : Optional[int] = 50 , _UpperCAmelCase : int = 7.5 , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Dict = 0.0 , _UpperCAmelCase : str = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : List[Any] = "pil" , _UpperCAmelCase : Tuple = True , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Union[str, Any] = 1 , **_UpperCAmelCase : int , ):
_A = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
_A = self.segmentation_model(**UpperCamelCase__ )
_A = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_A = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_A = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , ) | 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''deit'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : int=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=224 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : int=16 , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = encoder_stride
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : Any ):
return 1E-4
| 505 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( __A , __A = None , __A = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase__ = 0
if end is None:
UpperCAmelCase__ = len(__A ) - 1
if start >= end:
return
UpperCAmelCase__ = (start + end) // 2
slowsort(__A , __A , __A )
slowsort(__A , mid + 1 , __A )
if sequence[end] < sequence[mid]:
UpperCAmelCase__ , UpperCAmelCase__ = sequence[mid], sequence[end]
slowsort(__A , __A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 475 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 'deit'
def __init__( self : Union[str, Any] , _lowercase : List[str]=7_68 , _lowercase : Optional[Any]=12 , _lowercase : Optional[int]=12 , _lowercase : Dict=30_72 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Union[str, Any]=0.0_2 , _lowercase : Optional[int]=1E-12 , _lowercase : Dict=2_24 , _lowercase : List[str]=16 , _lowercase : str=3 , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=16 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = encoder_stride
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= version.parse('1.11' )
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return 1E-4
| 475 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
A : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
A : Union[str, Any] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A (_lowerCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 42
class A (_lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def a_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
if latents is None:
A__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
A__ = latents.to(UpperCamelCase_ )
A__ = latents * scheduler.init_noise_sigma
return latents
def a_ ( self : Optional[Any] , __lowerCAmelCase : List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A__ = torch.device(f'cuda:{gpu_id}' )
A__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def a_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
A__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
A__ = self.image_processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
A__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
A__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
A__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
A__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
A__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
A__ = self._execution_device
A__ = batch_size * num_images_per_prompt
A__ = guidance_scale > 1.0
A__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
A__ = self.scheduler.timesteps
A__ = self.prior.config.num_embeddings
A__ = self.prior.config.embedding_dim
A__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
A__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
A__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
A__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
A__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(UpperCamelCase_ )
A__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
A__ = images.cpu().numpy()
if output_type == "pil":
A__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 176 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : int , UpperCamelCase_ : CLIPSegForImageSegmentation , UpperCamelCase_ : CLIPSegProcessor , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ : int = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = dict(scheduler.config )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : List[Any] = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ : Any = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Dict = dict(scheduler.config )
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Any = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : List[str] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase_ : str , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowerCamelCase_ : Union[str, Any] = self.segmentation_model(**UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ : int = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 501 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__SCREAMING_SNAKE_CASE =True
except ImportError:
__SCREAMING_SNAKE_CASE =False
try:
from torch.hub import _get_torch_home
__SCREAMING_SNAKE_CASE =_get_torch_home()
except ImportError:
__SCREAMING_SNAKE_CASE =os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
__SCREAMING_SNAKE_CASE =os.path.join(torch_cache_home, "transformers")
__SCREAMING_SNAKE_CASE ="https://cdn.huggingface.co"
__SCREAMING_SNAKE_CASE ="https://s3.amazonaws.com/models.huggingface.co/bert"
__SCREAMING_SNAKE_CASE ="/".join(str(Path(__file__).resolve()).split("/")[:-1])
__SCREAMING_SNAKE_CASE =os.path.join(PATH, "config.yaml")
__SCREAMING_SNAKE_CASE =os.path.join(PATH, "attributes.txt")
__SCREAMING_SNAKE_CASE =os.path.join(PATH, "objects.txt")
__SCREAMING_SNAKE_CASE =os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
__SCREAMING_SNAKE_CASE =os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
__SCREAMING_SNAKE_CASE =os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
__SCREAMING_SNAKE_CASE ="pytorch_model.bin"
__SCREAMING_SNAKE_CASE ="config.yaml"
def lowercase__( __SCREAMING_SNAKE_CASE : Any=OBJECTS , __SCREAMING_SNAKE_CASE : Any=ATTRIBUTES ):
lowercase_ : List[Any] = []
with open(__SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
lowercase_ : Any = []
with open(__SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Dict = OrderedDict()
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowercase_ : str = pkl.load(__SCREAMING_SNAKE_CASE )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowercase_ : Union[str, Any] = ckp.pop(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
lowercase_ : int = torch.tensor(__SCREAMING_SNAKE_CASE )
else:
assert isinstance(__SCREAMING_SNAKE_CASE , torch.tensor ), type(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = v
return r
class UpperCamelCase :
lowercase = {}
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = "root" ,__UpperCamelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = name
lowercase_ : Union[str, Any] = level
lowercase_ : List[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowercase_ : Optional[int] = copy.deepcopy(__UpperCamelCase )
lowercase_ : int = copy.deepcopy(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = Config(__UpperCamelCase ,name=__UpperCamelCase ,level=level + 1 )
lowercase_ : Dict = v
setattr(self ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = d
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = val
lowercase_ : Optional[Any] = val
lowercase_ : Optional[int] = key.split('.' )
lowercase_ : Dict = len(__UpperCamelCase ) - 1
lowercase_ : List[Any] = self._pointer
if len(__UpperCamelCase ) > 1:
for i, l in enumerate(__UpperCamelCase ):
if hasattr(self ,__UpperCamelCase ) and isinstance(getattr(self ,__UpperCamelCase ) ,__UpperCamelCase ):
setattr(getattr(self ,__UpperCamelCase ) ,'.'.join(levels[i:] ) ,__UpperCamelCase )
if l == last_level:
lowercase_ : Any = val
else:
lowercase_ : Any = pointer[l]
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._pointer
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
with open(f'''{file_name}''' ,'w' ) as stream:
dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
with open(f'''{file_name}''' ,'w' ) as stream:
json.dump(__UpperCamelCase ,__UpperCamelCase )
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> Tuple:
'''simple docstring'''
with open(__UpperCamelCase ) as stream:
lowercase_ : Union[str, Any] = load(__UpperCamelCase ,Loader=__UpperCamelCase )
return data
def __str__( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = ' '
if self._name != "root":
lowercase_ : List[Any] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
lowercase_ : List[str] = ''
lowercase_ : Tuple = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__UpperCamelCase ).__name__})\n'''
lowercase_ : Any = level
return r[:-1]
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase )
return cls(__UpperCamelCase )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = kwargs.pop('cache_dir' ,__UpperCamelCase )
lowercase_ : Optional[Any] = kwargs.pop('force_download' ,__UpperCamelCase )
lowercase_ : Tuple = kwargs.pop('resume_download' ,__UpperCamelCase )
lowercase_ : Tuple = kwargs.pop('proxies' ,__UpperCamelCase )
lowercase_ : Dict = kwargs.pop('local_files_only' ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ):
lowercase_ : List[str] = os.path.join(__UpperCamelCase ,__UpperCamelCase )
elif os.path.isfile(__UpperCamelCase ) or is_remote_url(__UpperCamelCase ):
lowercase_ : int = pretrained_model_name_or_path
else:
lowercase_ : Tuple = hf_bucket_url(__UpperCamelCase ,filename=__UpperCamelCase ,use_cdn=__UpperCamelCase )
try:
# Load from URL or cache if already cached
lowercase_ : Optional[int] = cached_path(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowercase_ : str = Config.load_yaml(__UpperCamelCase )
except EnvironmentError:
lowercase_ : Optional[int] = 'Can\'t load config for'
raise EnvironmentError(__UpperCamelCase )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(__UpperCamelCase ), kwargs
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Any = torch.load('dump.pt' , map_location=in_tensor.device )
lowercase_ : Any = in_tensor.numpy()
lowercase_ : Tuple = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Dict = urlparse(__SCREAMING_SNAKE_CASE )
return parsed.scheme in ("http", "https")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=True ):
lowercase_ : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowercase_ : Optional[int] = '/' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : List[Any]=None , ):
lowercase_ : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join('{}/{}'.format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in user_agent.items() )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
lowercase_ : Optional[int] = {'user-agent': ua}
if resume_size > 0:
lowercase_ : Dict = 'bytes=%d-' % (resume_size,)
lowercase_ : Optional[int] = requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE )
if response.status_code == 4_16: # Range not satisfiable
return
lowercase_ : Union[str, Any] = response.headers.get('Content-Length' )
lowercase_ : Optional[int] = resume_size + int(__SCREAMING_SNAKE_CASE ) if content_length is not None else None
lowercase_ : List[str] = tqdm(
unit='B' , unit_scale=__SCREAMING_SNAKE_CASE , total=__SCREAMING_SNAKE_CASE , initial=__SCREAMING_SNAKE_CASE , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__SCREAMING_SNAKE_CASE ) )
temp_file.write(__SCREAMING_SNAKE_CASE )
progress.close()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=False , ):
if cache_dir is None:
lowercase_ : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = None
if not local_files_only:
try:
lowercase_ : Union[str, Any] = requests.head(__SCREAMING_SNAKE_CASE , allow_redirects=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE )
if response.status_code == 2_00:
lowercase_ : int = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowercase_ : int = url_to_filename(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# get cache path to put the file
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__SCREAMING_SNAKE_CASE ):
return cache_path
else:
lowercase_ : str = [
file
for file in fnmatch.filter(os.listdir(__SCREAMING_SNAKE_CASE ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
return os.path.join(__SCREAMING_SNAKE_CASE , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowercase_ : Tuple = cache_path + '.lock'
with FileLock(__SCREAMING_SNAKE_CASE ):
# If the download just completed while the lock was activated.
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowercase_ : Dict = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(__SCREAMING_SNAKE_CASE , 'a+b' ) as f:
yield f
lowercase_ : str = _resumable_file_manager
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = os.stat(__SCREAMING_SNAKE_CASE ).st_size
else:
lowercase_ : Dict = 0
else:
lowercase_ : Tuple = partial(tempfile.NamedTemporaryFile , dir=__SCREAMING_SNAKE_CASE , delete=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , __SCREAMING_SNAKE_CASE , temp_file.name , )
http_get(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_size=__SCREAMING_SNAKE_CASE , user_agent=__SCREAMING_SNAKE_CASE , )
os.replace(temp_file.name , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = {'url': url, 'etag': etag}
lowercase_ : str = cache_path + '.json'
with open(__SCREAMING_SNAKE_CASE , 'w' ) as meta_file:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cache_path
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None ):
lowercase_ : Dict = url.encode('utf-8' )
lowercase_ : Optional[Any] = shaaaa(__SCREAMING_SNAKE_CASE )
lowercase_ : int = url_hash.hexdigest()
if etag:
lowercase_ : Any = etag.encode('utf-8' )
lowercase_ : Tuple = shaaaa(__SCREAMING_SNAKE_CASE )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , ):
if cache_dir is None:
lowercase_ : int = TRANSFORMERS_CACHE
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = str(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
if is_remote_url(__SCREAMING_SNAKE_CASE ):
# URL, so get it from the cache (downloading if necessary)
lowercase_ : int = get_from_cache(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , user_agent=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
elif os.path.exists(__SCREAMING_SNAKE_CASE ):
# File, and it exists.
lowercase_ : Optional[Any] = url_or_filename
elif urlparse(__SCREAMING_SNAKE_CASE ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(__SCREAMING_SNAKE_CASE ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(__SCREAMING_SNAKE_CASE ) )
if extract_compressed_file:
if not is_zipfile(__SCREAMING_SNAKE_CASE ) and not tarfile.is_tarfile(__SCREAMING_SNAKE_CASE ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowercase_ : Tuple = os.path.split(__SCREAMING_SNAKE_CASE )
lowercase_ : str = output_file.replace('.' , '-' ) + '-extracted'
lowercase_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isdir(__SCREAMING_SNAKE_CASE ) and os.listdir(__SCREAMING_SNAKE_CASE ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowercase_ : Any = output_path + '.lock'
with FileLock(__SCREAMING_SNAKE_CASE ):
shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE )
if is_zipfile(__SCREAMING_SNAKE_CASE ):
with ZipFile(__SCREAMING_SNAKE_CASE , 'r' ) as zip_file:
zip_file.extractall(__SCREAMING_SNAKE_CASE )
zip_file.close()
elif tarfile.is_tarfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = tarfile.open(__SCREAMING_SNAKE_CASE )
tar_file.extractall(__SCREAMING_SNAKE_CASE )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(__SCREAMING_SNAKE_CASE ) )
return output_path_extracted
return output_path
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]="," ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : int = eval(f.read() )
else:
lowercase_ : Dict = requests.get(__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Dict = requests.json()
except Exception:
lowercase_ : str = req.content.decode()
assert data is not None, "could not connect"
try:
lowercase_ : List[Any] = eval(__SCREAMING_SNAKE_CASE )
except Exception:
lowercase_ : str = data.split('\n' )
req.close()
return data
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[str] = requests.get(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : List[Any] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as stream:
lowercase_ : Dict = pkl.load(__SCREAMING_SNAKE_CASE )
lowercase_ : int = weights.pop('model' )
lowercase_ : Tuple = {}
for k, v in model.items():
lowercase_ : str = torch.from_numpy(__SCREAMING_SNAKE_CASE )
if "running_var" in k:
lowercase_ : Union[str, Any] = torch.tensor([0] )
lowercase_ : Optional[int] = k.replace('running_var' , 'num_batches_tracked' )
lowercase_ : List[Any] = zero
return new
def lowercase__( ):
print(F'''{os.path.abspath(os.path.join(__SCREAMING_SNAKE_CASE , os.pardir ) )}/demo.ipynb''' )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any="RGB" ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = cva.imread(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Tuple = get_image_from_url(__SCREAMING_SNAKE_CASE )
assert img is not None, F'''could not connect to: {im}'''
lowercase_ : Optional[Any] = cva.cvtColor(__SCREAMING_SNAKE_CASE , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowercase_ : Any = img[:, :, ::-1]
return img
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=1 ):
return (images[i : i + batch] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ))
| 701 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'wav2vec2'
def __init__( self ,__UpperCamelCase=32 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase="group" ,__UpperCamelCase="gelu" ,__UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCamelCase=False ,__UpperCamelCase=128 ,__UpperCamelCase=16 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0.05 ,__UpperCamelCase=10 ,__UpperCamelCase=2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=10 ,__UpperCamelCase=0 ,__UpperCamelCase=320 ,__UpperCamelCase=2 ,__UpperCamelCase=0.1 ,__UpperCamelCase=100 ,__UpperCamelCase=256 ,__UpperCamelCase=256 ,__UpperCamelCase=0.1 ,__UpperCamelCase="sum" ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=256 ,__UpperCamelCase=(512, 512, 512, 512, 1500) ,__UpperCamelCase=(5, 3, 3, 1, 1) ,__UpperCamelCase=(1, 2, 3, 1, 1) ,__UpperCamelCase=512 ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,__UpperCamelCase=False ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase ,pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Tuple = feat_extract_norm
lowercase_ : Dict = feat_extract_activation
lowercase_ : List[str] = list(__UpperCamelCase )
lowercase_ : str = list(__UpperCamelCase )
lowercase_ : Dict = list(__UpperCamelCase )
lowercase_ : Optional[Any] = conv_bias
lowercase_ : Dict = num_conv_pos_embeddings
lowercase_ : List[str] = num_conv_pos_embedding_groups
lowercase_ : Optional[Any] = len(self.conv_dim )
lowercase_ : Any = num_hidden_layers
lowercase_ : List[Any] = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : int = hidden_dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Tuple = feat_proj_dropout
lowercase_ : List[str] = final_dropout
lowercase_ : Union[str, Any] = layerdrop
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = vocab_size
lowercase_ : Optional[int] = do_stable_layer_norm
lowercase_ : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : Dict = apply_spec_augment
lowercase_ : Optional[int] = mask_time_prob
lowercase_ : Union[str, Any] = mask_time_length
lowercase_ : List[str] = mask_time_min_masks
lowercase_ : List[str] = mask_feature_prob
lowercase_ : Any = mask_feature_length
lowercase_ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ : List[Any] = num_codevectors_per_group
lowercase_ : Optional[int] = num_codevector_groups
lowercase_ : Dict = contrastive_logits_temperature
lowercase_ : int = feat_quantizer_dropout
lowercase_ : Optional[int] = num_negatives
lowercase_ : str = codevector_dim
lowercase_ : str = proj_codevector_dim
lowercase_ : Optional[Any] = diversity_loss_weight
# ctc loss
lowercase_ : Tuple = ctc_loss_reduction
lowercase_ : int = ctc_zero_infinity
# adapter
lowercase_ : int = add_adapter
lowercase_ : Dict = adapter_kernel_size
lowercase_ : List[str] = adapter_stride
lowercase_ : Dict = num_adapter_layers
lowercase_ : Dict = output_hidden_size or hidden_size
lowercase_ : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ : Any = list(__UpperCamelCase )
lowercase_ : str = list(__UpperCamelCase )
lowercase_ : Any = list(__UpperCamelCase )
lowercase_ : Tuple = xvector_output_dim
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 477 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a__ ( lowercase : Dict=None ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(add_help=lowercase, allow_abbrev=lowercase )
# The main config parser
_UpperCamelCase = config_command_parser(lowercase )
# The subparser to add commands to
_UpperCamelCase = config_parser.add_subparsers(title='''subcommands''', dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(lowercase, parents=[parent_parser] )
update_command_parser(lowercase, parents=[parent_parser] )
return config_parser
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = get_config_parser()
_UpperCamelCase = config_parser.parse_args()
if not hasattr(lowercase, '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 98 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
SCREAMING_SNAKE_CASE_ = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
SCREAMING_SNAKE_CASE_ = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Dict:
if return_pvalue:
__lowerCAmelCase = pearsonr(snake_case_ , snake_case_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case_ , snake_case_ )[0] )}
| 465 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = ''''''
A : Any = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A : Any = None # compression type in fsspec. ex: "gzip"
A : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, A = "", A = None, A = None, **A ):
'''simple docstring'''
super().__init__(self, **__UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
SCREAMING_SNAKE_CASE : int = fsspec.open(
__UpperCamelCase, mode='rb', protocol=__UpperCamelCase, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
SCREAMING_SNAKE_CASE : List[str] = os.path.basename(self.file.path.split('::' )[0] )
SCREAMING_SNAKE_CASE : str = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
SCREAMING_SNAKE_CASE : Optional[Any] = None
@classmethod
def UpperCamelCase_ ( cls, A ):
'''simple docstring'''
return super()._strip_protocol(__UpperCamelCase ).lstrip('/' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.dir_cache is None:
SCREAMING_SNAKE_CASE : Any = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
SCREAMING_SNAKE_CASE : List[str] = {f['name']: f}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.file.open().read()
def UpperCamelCase_ ( self, A, A = "rb", A=None, A=True, A=None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self._strip_protocol(__UpperCamelCase )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'" )
return self.file.open()
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''bz2'''
A : Tuple = '''bz2'''
A : Optional[Any] = '''.bz2'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = '''gzip'''
A : Tuple = '''gzip'''
A : List[Any] = '''.gz'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = '''lz4'''
A : Tuple = '''lz4'''
A : Any = '''.lz4'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = '''xz'''
A : List[str] = '''xz'''
A : List[str] = '''.xz'''
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''zstd'''
A : Optional[Any] = '''zstd'''
A : int = '''.zst'''
def __init__( self, A, A = "rb", A = None, A = None, A = DEFAULT_BLOCK_SIZE, **A, ):
'''simple docstring'''
super().__init__(
fo=__UpperCamelCase, mode=__UpperCamelCase, target_protocol=__UpperCamelCase, target_options=__UpperCamelCase, block_size=__UpperCamelCase, **__UpperCamelCase, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
SCREAMING_SNAKE_CASE : Optional[Any] = self.file.__enter__
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = file_
def __enter__( self ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self, *A, **A ):
'''simple docstring'''
self._file.__exit__(*__UpperCamelCase, **__UpperCamelCase )
def __iter__( self ):
'''simple docstring'''
return iter(self._file )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return next(self._file )
def __getattr__( self, A ):
'''simple docstring'''
return getattr(self._file, __UpperCamelCase )
def fixed_enter(*A, **A ):
return WrappedFile(_enter(*__UpperCamelCase, **__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = fixed_enter
| 707 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase__( __UpperCamelCase: bytes ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
SCREAMING_SNAKE_CASE : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE : Union[str, Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
SCREAMING_SNAKE_CASE : Optional[Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : List[str] = 'alsa'
SCREAMING_SNAKE_CASE : str = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Dict = 'avfoundation'
SCREAMING_SNAKE_CASE : int = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE : str = 'dshow'
SCREAMING_SNAKE_CASE : Any = 'default'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[Union[Tuple[float, float], float]] = None ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Any = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Dict = chunk_length_s
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = np.floataa
SCREAMING_SNAKE_CASE : List[Any] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
SCREAMING_SNAKE_CASE : str = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Tuple[int, int] ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = B''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
SCREAMING_SNAKE_CASE : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE : Optional[int] = False
yield item
SCREAMING_SNAKE_CASE : List[Any] = stride_left
SCREAMING_SNAKE_CASE : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
SCREAMING_SNAKE_CASE : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : List[Any] = False
yield item
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Any = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 508 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'rwkv'
SCREAMING_SNAKE_CASE_ = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=50277 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = context_length
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = rescale_every
lowerCamelCase_ = use_cache
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 42 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Dict = Dataset.from_list(A_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(A_ ):
self.assertDictEqual(A_ , example_records[i] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = self._create_example_records()
__lowerCAmelCase : Optional[Any] = Dataset.from_list(A_ )
__lowerCAmelCase : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase__ ( self ) ->Union[str, Any]: # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase : List[Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def UpperCamelCase__ ( self ) ->Tuple: # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__lowerCAmelCase : Union[str, Any] = Dataset.from_list(A_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = Dataset.from_list([] )
self.assertEqual(len(A_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 492 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase__( __SCREAMING_SNAKE_CASE : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = ''.join(bin(__SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : Optional[int] = b'=' * ((6 - len(__SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase_ : List[Any] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = (
'argument should be a bytes-like object or ASCII string, '
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
lowercase_ : List[str] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase_ : List[str] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Union[str, Any] = encoded_data[:-padding]
lowercase_ : List[Any] = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | """simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = OpenAIGPTTokenizer
snake_case = OpenAIGPTTokenizerFast
snake_case = True
snake_case = False
def _snake_case ( self )->str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
A_ : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ : Any = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
return "lower newer", "lower newer"
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A_ : Optional[int] = '''lower'''
A_ : Dict = ['''low''', '''er</w>''']
A_ : Any = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = tokens + ['''<unk>''']
A_ : Union[str, Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=15 )->Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
A_ : Optional[Any] = '''This is a simple input'''
A_ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
A_ : Dict = ('''This is a simple input''', '''This is a pair''')
A_ : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
pass
| 590 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Dict = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A_ : str = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 590 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 'ylacombe/bark-small'
SCREAMING_SNAKE_CASE_ : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : int = 'en_speaker_1'
SCREAMING_SNAKE_CASE_ : Tuple = 'This is a test string'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'speaker_embeddings_path.json'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'speaker_embeddings'
def __A ( self , **__lowerCAmelCase ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 35
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : str = {
'semantic_prompt': np.ones(__lowerCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE_ : Any = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE_ : int = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = BarkProcessor(tokenizer=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 712 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__: Optional[int] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__: Dict = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__: Any = spec.loader.load_module()
lowerCAmelCase__: Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__: Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCAmelCase__: Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __SCREAMING_SNAKE_CASE ( ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : Any = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : Optional[Any] = inspect.getsource(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Tuple = True
break
SCREAMING_SNAKE_CASE_ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_ : str = '\n'.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 311 | 0 |
def __UpperCamelCase ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = len(lowercase__ )
lowerCAmelCase_ : int = len(lowercase__ )
lowerCAmelCase_ : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase_ : List[Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase_ : Dict = True
if a[i].islower():
lowerCAmelCase_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__snake_case : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def __UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
if os.name == "nt":
lowerCAmelCase_ : str = CursorInfo()
lowerCAmelCase_ : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
lowerCAmelCase_ : str = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __UpperCamelCase ( ) -> int:
'''simple docstring'''
if os.name == "nt":
lowerCAmelCase_ : int = CursorInfo()
lowerCAmelCase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
lowerCAmelCase_ : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 600 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCamelCase( UpperCamelCase__ : Any ) -> Union[str, Any]:
A : Optional[int] = botoa.client('''iam''' )
A : Union[str, Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
A : str = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _lowerCamelCase( UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
A : int = botoa.client('''iam''' )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def _lowerCamelCase( ) -> List[Any]:
A : Optional[int] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , A_ , )
A : Dict = None
if credentials_configuration == 0:
A : List[str] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
A : Union[str, Any] = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
A : List[Any] = _ask_field('''AWS Access Key ID: ''' )
A : Optional[int] = aws_access_key_id
A : Optional[int] = _ask_field('''AWS Secret Access Key: ''' )
A : int = aws_secret_access_key
A : Union[str, Any] = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
A : Optional[int] = aws_region
A : Tuple = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , A_ , )
if role_management == 0:
A : List[str] = _ask_field('''Enter your IAM role name: ''' )
else:
A : str = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
A : str = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
A : Optional[Any] = None
if is_custom_docker_image:
A : Dict = _ask_field('''Enter your Docker image: ''' , lambda UpperCamelCase__ : str(A_ ).lower() )
A : Any = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
A : Dict = None
if is_sagemaker_inputs_enabled:
A : List[Any] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda UpperCamelCase__ : str(A_ ).lower() , )
A : Union[str, Any] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
A : int = None
if is_sagemaker_metrics_enabled:
A : List[Any] = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda UpperCamelCase__ : str(A_ ).lower() , )
A : Any = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
A : Union[str, Any] = {}
A : Union[str, Any] = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
A : int = '''dynamo_'''
A : List[str] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A : int = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
A : str = _ask_options(
'''Which mode do you want to use?''' , A_ , lambda UpperCamelCase__ : TORCH_DYNAMO_MODES[int(A_ )] , default='''default''' , )
A : Union[str, Any] = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
A : Optional[int] = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=A_ , error_message='''Please enter yes or no.''' , )
A : List[str] = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
A : Union[str, Any] = _ask_options(
A_ , A_ , lambda UpperCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A : List[Any] = _ask_field(A_ , lambda UpperCamelCase__ : str(A_ ).lower() , default='''ml.p3.2xlarge''' )
A : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A : List[Any] = _ask_field(
'''How many machines do you want use? [1]: ''' , A_ , default=1 , )
A : List[str] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase( UpperCamelCase__ : int ) -> list[int]:
if num <= 0:
A : str = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(UpperCamelCase__ )
A : int = [True] * (num + 1)
A : Dict = []
A : str = 2
A : Any = int(math.sqrt(UpperCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase__ ):
if sieve[i] is True:
A : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 537 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """nllb-moe"""
_a = ["""past_key_values"""]
_a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase=128_112 , lowerCAmelCase=1_024 , lowerCAmelCase=12 , lowerCAmelCase=4_096 , lowerCAmelCase=16 , lowerCAmelCase=12 , lowerCAmelCase=4_096 , lowerCAmelCase=16 , lowerCAmelCase=0.05 , lowerCAmelCase=0.05 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=1_024 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="float32" , lowerCAmelCase=False , lowerCAmelCase=128 , lowerCAmelCase=64 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=0.001 , lowerCAmelCase=0.001 , lowerCAmelCase="all" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=1.0 , lowerCAmelCase=0.2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Any:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =d_model
_lowercase =encoder_ffn_dim
_lowercase =encoder_layers
_lowercase =encoder_attention_heads
_lowercase =decoder_ffn_dim
_lowercase =decoder_layers
_lowercase =decoder_attention_heads
_lowercase =dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =activation_function
_lowercase =init_std
_lowercase =encoder_layerdrop
_lowercase =decoder_layerdrop
_lowercase =use_cache
_lowercase =encoder_layers
_lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase =router_z_loss_coef
_lowercase =router_aux_loss_coef
_lowercase =decoder_sparse_step
_lowercase =encoder_sparse_step
_lowercase =num_experts
_lowercase =expert_capacity
_lowercase =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_lowercase =router_dtype
_lowercase =router_ignore_padding_tokens
_lowercase =batch_prioritized_routing
_lowercase =second_expert_policy
_lowercase =normalize_router_prob_before_dropping
_lowercase =moe_eval_capacity_token_fraction
_lowercase =moe_token_dropout
_lowercase =output_router_logits
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 291 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''LayoutLMv3FeatureExtractor''']
__lowerCAmelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 704 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __UpperCamelCase : str , __UpperCamelCase : dict ):
lowercase = BeautifulSoup(requests.get(__UpperCamelCase , params=__UpperCamelCase ).content , '''html.parser''' )
lowercase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params)) | 396 | 0 |
'''simple docstring'''
from math import factorial
lowercase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def a__ ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase ) )
def a__ ( lowercase : int = 60, lowercase : int = 1000000 ) -> int:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not isinstance(lowercase, lowercase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
_UpperCamelCase = 0
# the cached sizes of the previous chains
_UpperCamelCase = {}
for start_chain_element in range(1, lowercase ):
# The temporary set will contain the elements of the chain
_UpperCamelCase = set()
_UpperCamelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_UpperCamelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase )
chain_set_length += 1
_UpperCamelCase = digit_factorial_sum(lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_UpperCamelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 98 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE__ : int = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
SCREAMING_SNAKE_CASE__ : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = """whisper"""
_UpperCamelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case=51_865 , snake_case=80 , snake_case=6 , snake_case=4 , snake_case=6 , snake_case=4 , snake_case=1_536 , snake_case=1_536 , snake_case=0.0 , snake_case=0.0 , snake_case=50_257 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=256 , snake_case=0.0 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=False , snake_case=1_500 , snake_case=448 , snake_case=50_256 , snake_case=50_256 , snake_case=50_256 , snake_case=None , snake_case=[220, 50_256] , snake_case=False , snake_case=256 , snake_case=False , snake_case=0.05 , snake_case=10 , snake_case=2 , snake_case=0.0 , snake_case=10 , snake_case=0 , snake_case=7 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = vocab_size
a__ : int = num_mel_bins
a__ : Dict = d_model
a__ : List[Any] = encoder_layers
a__ : List[Any] = encoder_attention_heads
a__ : Optional[int] = decoder_layers
a__ : int = decoder_attention_heads
a__ : Optional[Any] = decoder_ffn_dim
a__ : List[Any] = encoder_ffn_dim
a__ : int = dropout
a__ : Optional[int] = attention_dropout
a__ : Tuple = activation_dropout
a__ : Optional[Any] = activation_function
a__ : List[Any] = init_std
a__ : List[Any] = encoder_layerdrop
a__ : Dict = decoder_layerdrop
a__ : List[Any] = use_cache
a__ : Union[str, Any] = encoder_layers
a__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Tuple = max_source_positions
a__ : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[Any] = classifier_proj_size
a__ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Tuple = apply_spec_augment
a__ : int = mask_time_prob
a__ : Optional[Any] = mask_time_length
a__ : List[str] = mask_time_min_masks
a__ : List[str] = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : Any = mask_feature_min_masks
a__ : List[str] = median_filter_width
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , suppress_tokens=snake_case , begin_suppress_tokens=snake_case , **snake_case , )
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a__ : Dict = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
a__ : Tuple = {0: "batch"}
else:
a__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
return common_inputs
def _snake_case ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 22_050 , snake_case = 5.0 , snake_case = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
a__ : int = OrderedDict()
a__ : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case , framework=snake_case , sampling_rate=snake_case , time_duration=snake_case , frequency=snake_case , )
a__ : Optional[int] = encoder_inputs["input_features"].shape[2]
a__ : str = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case , snake_case , snake_case , snake_case )
a__ : Any = encoder_inputs.pop("input_features" )
a__ : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
a__ : Tuple = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-3
| 112 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase):
@slow
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_lowerCamelCase ).to(_lowerCamelCase )
A_ : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
A_ : List[Any] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
A_ : Any = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
A_ : Tuple = model(input_ids.to(_lowerCamelCase ) , labels=labels.to(_lowerCamelCase ) ).loss
A_ : Optional[Any] = -(labels.shape[-1] * loss.item())
A_ : List[Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 361 |
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
for i in range(0 , _UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
for i in range(_UpperCAmelCase , 0 , -1 ):
for _ in range(_UpperCAmelCase , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(_UpperCAmelCase ) # upper half
reverse_floyd(_UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
_lowerCamelCase : Tuple = 1
while K:
_lowerCamelCase : Optional[int] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_lowerCamelCase : Optional[Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 361 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 225 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__a = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a = []
__a = []
__a = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a = name[1:]
# figure out how many levels deep the name is
__a = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_SCREAMING_SNAKE_CASE )
# read data
__a = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
names.append("""/""".join(_SCREAMING_SNAKE_CASE ) )
arrays.append(_SCREAMING_SNAKE_CASE )
logger.info(f"Read a total of {len(_SCREAMING_SNAKE_CASE ):,} layers" )
# Sanity check
if len(set(_SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE ) )})" )
__a = list(set(_SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = full_name.split("""/""" )
__a = model
__a = []
for i, m_name in enumerate(_SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__a = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__a = getattr(_SCREAMING_SNAKE_CASE , """encoder""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """layer""" )
__a = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """pooler""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """intermediate""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a = """.""".join(_SCREAMING_SNAKE_CASE )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _SCREAMING_SNAKE_CASE ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _SCREAMING_SNAKE_CASE ):
__a = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a = array.transpose()
if pointer.shape == array.shape:
__a = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
logger.info(f"Loading model based on config from {config_path}..." )
__a = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
__a = BertModel(_SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowerCamelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 225 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase_ = "\\n\n"
lowercase_ = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowercase_ = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def a__ (self , A , A , A = 16 , A = True , A=None ) -> Optional[int]:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a = '''cuda'''
else:
_a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a = AutoModelForCausalLM.from_pretrained(A )
_a = model.to(A )
_a = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a = model.config.max_length - 1
else:
_a = model.config.max_length
_a = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='''pt''' , return_attention_mask=A , ).to(A )
_a = encodings['''input_ids''']
_a = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a = []
_a = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
_a = min(start_index + batch_size , len(A ) )
_a = encoded_texts[start_index:end_index]
_a = attn_masks[start_index:end_index]
if add_start_token:
_a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
_a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
_a = encoded_batch
with torch.no_grad():
_a = model(A , attention_mask=A ).logits
_a = out_logits[..., :-1, :].contiguous()
_a = labels[..., 1:].contiguous()
_a = attn_mask[..., 1:].contiguous()
_a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 352 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''')
if not scores:
raise ValueError('''Scores cannot be empty''')
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A) , )
)
def lowerCAmelCase ():
"""simple docstring"""
_a = [90, 23, 6, 33, 21, 65, 123, 34_423]
_a = math.log(len(__A) , 2)
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 | 1 |
from __future__ import annotations
from typing import Any
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> None:
create_state_space_tree(__snake_case , [] , 0 )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> None:
if index == len(__snake_case ):
print(__snake_case )
return
create_state_space_tree(__snake_case , __snake_case , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__snake_case , __snake_case , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__a: list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq) | 108 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ : int = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
_lowercase : str = ["pixel_values"]
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 2_5_5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_lowerCamelCase )
a__ = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
a__ = get_size_dict(_lowerCamelCase )
a__ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
a__ = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='''crop_size''' )
a__ = do_resize
a__ = do_rescale
a__ = do_normalize
a__ = do_center_crop
a__ = crop_size
a__ = size
a__ = resample
a__ = rescale_factor
a__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
a__ = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(_lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a__ = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
a__ = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> np.ndarray:
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(_lowerCamelCase , param_name='''crop_size''' , default_to_square=_lowerCamelCase )
a__ = resample if resample is not None else self.resample
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
a__ = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
a__ = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
a__ = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
a__ = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
a__ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
a__ = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 712 |
from manim import *
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
a__ = Rectangle(height=0.5 , width=0.5 )
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = VGroup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = Text('''CPU''' , font_size=2_4 )
a__ = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE )
a__ = [mem.copy() for i in range(1 )]
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = Text('''GPU''' , font_size=2_4 )
a__ = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
gpu.align_to(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
a__ = Text('''Model''' , font_size=2_4 )
a__ = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , )
a__ = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=2_4 , )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE ) , Write(SCREAMING_SNAKE_CASE ) )
self.add(SCREAMING_SNAKE_CASE )
a__ = []
a__ = []
a__ = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE ):
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
a__ = 0.46 / 4
a__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE )
self.play(*SCREAMING_SNAKE_CASE )
self.wait()
| 148 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__snake_case : List[str] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__snake_case : str = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__snake_case : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Optional[int], __snake_case : Dict, __snake_case : bool, __snake_case : Optional[Dict[int, int]] = None, __snake_case : bool = False, ) -> Any:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
A__ : Any =new_id
# turn into Numpy arrays
A__ : List[Any] =np.array(__snake_case )
A__ : List[Any] =np.array(__snake_case )
if reduce_labels:
A__ : str =255
A__ : Optional[Any] =label - 1
A__ : Tuple =255
A__ : Optional[Any] =label != ignore_index
A__ : str =np.not_equal(__snake_case, __snake_case )
A__ : List[str] =pred_label[mask]
A__ : str =np.array(__snake_case )[mask]
A__ : List[str] =pred_label[pred_label == label]
A__ : List[Any] =np.histogram(__snake_case, bins=__snake_case, range=(0, num_labels - 1) )[0]
A__ : Any =np.histogram(__snake_case, bins=__snake_case, range=(0, num_labels - 1) )[0]
A__ : Optional[int] =np.histogram(__snake_case, bins=__snake_case, range=(0, num_labels - 1) )[0]
A__ : Dict =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : Optional[int], __snake_case : bool, __snake_case : Optional[Dict[int, int]] = None, __snake_case : bool = False, ) -> Optional[int]:
"""simple docstring"""
A__ : int =np.zeros((num_labels,), dtype=np.floataa )
A__ : int =np.zeros((num_labels,), dtype=np.floataa )
A__ : List[Any] =np.zeros((num_labels,), dtype=np.floataa )
A__ : int =np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(__snake_case, __snake_case ):
A__ , A__ , A__ , A__ : List[Any] =intersect_and_union(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : Union[str, Any], __snake_case : bool, __snake_case : Optional[int] = None, __snake_case : Optional[Dict[int, int]] = None, __snake_case : bool = False, ) -> List[Any]:
"""simple docstring"""
A__ , A__ , A__ , A__ : Optional[Any] =total_intersect_and_union(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# compute metrics
A__ : Tuple ={}
A__ : Optional[int] =total_area_intersect.sum() / total_area_label.sum()
A__ : Optional[int] =total_area_intersect / total_area_union
A__ : str =total_area_intersect / total_area_label
A__ : Any =np.nanmean(__snake_case )
A__ : Tuple =np.nanmean(__snake_case )
A__ : List[str] =all_acc
A__ : Tuple =iou
A__ : Tuple =acc
if nan_to_num is not None:
A__ : str ={metric: np.nan_to_num(__snake_case, nan=__snake_case ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 215 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self : int , lowerCAmelCase_ : int = 10_00 , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
'''simple docstring'''
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase_ )
# standard deviation of the initial noise distribution
A__ : Union[str, Any] =1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A__ : str =4
# running values
A__ : Optional[int] =[]
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None ) -> Tuple:
'''simple docstring'''
A__ : int =num_inference_steps
A__ : str =torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
A__ : Optional[int] =torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A__ : Tuple =torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
A__ : Optional[Any] =torch.sin(steps * math.pi / 2 ) ** 2
A__ : Optional[Any] =(1.0 - self.betas**2) ** 0.5
A__ : Union[str, Any] =(torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
A__ : str =timesteps.to(lowerCAmelCase_ )
A__ : str =[]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
A__ : Optional[int] =(self.timesteps == timestep).nonzero().item()
A__ : List[str] =timestep_index + 1
A__ : List[Any] =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase_ )
if len(self.ets ) == 1:
A__ : Union[str, Any] =self.ets[-1]
elif len(self.ets ) == 2:
A__ : Union[str, Any] =(3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A__ : int =(23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A__ : Dict =(1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A__ : str =self._get_prev_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : int ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Tuple =self.alphas[timestep_index]
A__ : List[Any] =self.betas[timestep_index]
A__ : int =self.alphas[prev_timestep_index]
A__ : List[str] =self.betas[prev_timestep_index]
A__ : int =(sample - sigma * ets) / max(lowerCAmelCase_ , 1e-8 )
A__ : Dict =next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : str ) -> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 215 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
SCREAMING_SNAKE_CASE__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = VOCAB_FILES_NAMES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Dict = ElectraTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase ) != tokenize_chinese_chars
):
snake_case = getattr(lowerCAmelCase , normalizer_state.pop('type' ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**lowerCAmelCase )
snake_case = do_lower_case
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=None ):
"""simple docstring"""
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 104 | """simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = ['a', 'b', 'c']
# Defaults to last layer if both are None
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['c'] )
self.assertEqual(lowerCAmelCase , [2] )
# Out indices set to match out features
snake_case ,snake_case = get_aligned_output_features_output_indices(['a', 'c'] , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [0, 2] )
# Out features set to match out indices
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , [0, 2] , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , [-3, -1] , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [-3, -1] )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCAmelCase )
# Out features must be a list
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(lowerCAmelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(lowerCAmelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = BackboneMixin()
snake_case = ['a', 'b', 'c']
snake_case = ['a', 'c']
snake_case = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __snake_case ( lowerCAmelCase : list[list[float]] ):
__UpperCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
__UpperCAmelCase , __UpperCAmelCase = matrix[1][1], matrix[0][0]
__UpperCAmelCase , __UpperCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__UpperCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCAmelCase = array(lowerCAmelCase )
for i in range(3 ):
for j in range(3 ):
__UpperCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCAmelCase = array(lowerCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 396 | '''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCamelCase : Any = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_UpperCamelCase : List[Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_UpperCamelCase : List[str] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowercase( datasets.Metric ):
"""simple docstring"""
def snake_case ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ) ,id='references' ),
} ) ,)
def snake_case ( self: Dict ,a: List[List[List[str]]] ,a: List[List[str]] ,a: int = 1 ,a: int = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a ,hypotheses=a ,min_len=a ,max_len=a )
}
| 396 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( __a ):
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
with open(_UpperCamelCase , encoding="utf-8" ) as input_file:
lowerCAmelCase_ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowerCAmelCase_ = input_file.read()
lowerCAmelCase_ = regexp.search(_UpperCamelCase )
return match
def __a ( self , _UpperCamelCase ) -> Dict:
with open(_UpperCamelCase , encoding="utf-8" ) as input_file:
lowerCAmelCase_ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
lowerCAmelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase_ = regexp.finditer(_UpperCamelCase )
lowerCAmelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = Path("./datasets" )
lowerCAmelCase_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCamelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = Path("./datasets" )
lowerCAmelCase_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCamelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 279 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( __a , __a ):
@register_to_config
def __init__( self , _UpperCamelCase = 128 , _UpperCamelCase = 256 , _UpperCamelCase = 2000.0 , _UpperCamelCase = 768 , _UpperCamelCase = 12 , _UpperCamelCase = 12 , _UpperCamelCase = 64 , _UpperCamelCase = 2_048 , _UpperCamelCase = 0.1 , ) -> str:
super().__init__()
lowerCAmelCase_ = nn.Sequential(
nn.Linear(_UpperCamelCase , d_model * 4 , bias=_UpperCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCamelCase ) , nn.SiLU() , )
lowerCAmelCase_ = nn.Embedding(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = False
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(p=_UpperCamelCase )
lowerCAmelCase_ = nn.ModuleList()
for lyr_num in range(_UpperCamelCase ):
# FiLM conditional T5 decoder
lowerCAmelCase_ = DecoderLayer(d_model=_UpperCamelCase , d_kv=_UpperCamelCase , num_heads=_UpperCamelCase , d_ff=_UpperCamelCase , dropout_rate=_UpperCamelCase )
self.decoders.append(_UpperCamelCase )
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(p=_UpperCamelCase )
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCAmelCase_ = self.conditioning_emb(_UpperCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase_ = torch.broadcast_to(
torch.arange(_UpperCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCAmelCase_ = self.position_encoding(_UpperCamelCase )
lowerCAmelCase_ = self.continuous_inputs_projection(_UpperCamelCase )
inputs += position_encodings
lowerCAmelCase_ = self.dropout(_UpperCamelCase )
# decoder: No padding present.
lowerCAmelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase_ = [(x, self.encoder_decoder_mask(_UpperCamelCase , _UpperCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCAmelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCAmelCase_ = lyr(
_UpperCamelCase , conditioning_emb=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )[0]
lowerCAmelCase_ = self.decoder_norm(_UpperCamelCase )
lowerCAmelCase_ = self.post_dropout(_UpperCamelCase )
lowerCAmelCase_ = self.spec_out(_UpperCamelCase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-6 ) -> Dict:
super().__init__()
lowerCAmelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_UpperCamelCase , d_kv=_UpperCamelCase , num_heads=_UpperCamelCase , dropout_rate=_UpperCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_UpperCamelCase , d_kv=_UpperCamelCase , num_heads=_UpperCamelCase , dropout_rate=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_UpperCamelCase , d_ff=_UpperCamelCase , dropout_rate=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase ) )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ) -> Any:
lowerCAmelCase_ = self.layer[0](
_UpperCamelCase , conditioning_emb=_UpperCamelCase , attention_mask=_UpperCamelCase , )
if encoder_hidden_states is not None:
lowerCAmelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase_ = self.layer[1](
_UpperCamelCase , key_value_states=_UpperCamelCase , attention_mask=_UpperCamelCase , )
# Apply Film Conditional Feed Forward layer
lowerCAmelCase_ = self.layer[-1](_UpperCamelCase , _UpperCamelCase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase )
lowerCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCamelCase )
lowerCAmelCase_ = Attention(query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , out_bias=_UpperCamelCase , scale_qk=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , ) -> Dict:
# pre_self_attention_layer_norm
lowerCAmelCase_ = self.layer_norm(_UpperCamelCase )
if conditioning_emb is not None:
lowerCAmelCase_ = self.FiLMLayer(_UpperCamelCase , _UpperCamelCase )
# Self-attention block
lowerCAmelCase_ = self.attention(_UpperCamelCase )
lowerCAmelCase_ = hidden_states + self.dropout(_UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
super().__init__()
lowerCAmelCase_ = Attention(query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , out_bias=_UpperCamelCase , scale_qk=_UpperCamelCase )
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase , eps=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , ) -> int:
lowerCAmelCase_ = self.layer_norm(_UpperCamelCase )
lowerCAmelCase_ = self.attention(
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowerCAmelCase_ = hidden_states + self.dropout(_UpperCamelCase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
super().__init__()
lowerCAmelCase_ = TaDenseGatedActDense(d_model=_UpperCamelCase , d_ff=_UpperCamelCase , dropout_rate=_UpperCamelCase )
lowerCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCamelCase )
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase , eps=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
lowerCAmelCase_ = self.layer_norm(_UpperCamelCase )
if conditioning_emb is not None:
lowerCAmelCase_ = self.film(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = self.DenseReluDense(_UpperCamelCase )
lowerCAmelCase_ = hidden_states + self.dropout(_UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
lowerCAmelCase_ = NewGELUActivation()
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = self.act(self.wi_a(_UpperCamelCase ) )
lowerCAmelCase_ = self.wi_a(_UpperCamelCase )
lowerCAmelCase_ = hidden_gelu * hidden_linear
lowerCAmelCase_ = self.dropout(_UpperCamelCase )
lowerCAmelCase_ = self.wo(_UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=1e-6 ) -> int:
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.ones(_UpperCamelCase ) )
lowerCAmelCase_ = eps
def __a ( self , _UpperCamelCase ) -> Union[str, Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCAmelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_UpperCamelCase )
lowerCAmelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
def __a ( self , _UpperCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_UpperCamelCase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> str:
super().__init__()
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , out_features * 2 , bias=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = self.scale_bias(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = torch.chunk(_UpperCamelCase , 2 , -1 )
lowerCAmelCase_ = x * (1 + scale) + shift
return x
| 279 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=10 ) -> Union[str, Any]:
__lowerCamelCase = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : str=10 ) -> Tuple:
__lowerCamelCase = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(__lowerCAmelCase , '''schedule.bin''' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
__lowerCamelCase = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> int:
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
__lowerCamelCase = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __A ( self : Any ) -> Union[str, Any]:
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , )
for _ in range(10_00 ):
__lowerCamelCase = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
a__ : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
a__ : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
a__ : Any = 10
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Dict:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCamelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__lowerCamelCase , __lowerCamelCase = data
__lowerCamelCase = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCamelCase = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__lowerCamelCase = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule
__lowerCamelCase = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f'''failed for {scheduler_func} in save and reload''' )
class lowerCAmelCase__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
__lowerCamelCase = fn
def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
__lowerCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 298 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self : Optional[int] ) -> Optional[int]:
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 2_56
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
__lowerCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCamelCase = copy.deepcopy(self.img )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
__lowerCamelCase = np.sum(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase = x[i] / self.k
self.sk += prk
__lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase = int(last % last )
__lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __A ( self : str ) -> Union[str, Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
SCREAMING_SNAKE_CASE__ : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 298 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __UpperCAmelCase ):
def __init__( self : str , snake_case__ : Tuple , snake_case__ : Optional[Any]=13 , snake_case__ : List[Any]=7 , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=99 , snake_case__ : Tuple=32 , snake_case__ : Tuple=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : List[str]=37 , snake_case__ : List[str]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple=512 , snake_case__ : Any=16 , snake_case__ : Optional[Any]=2 , snake_case__ : Tuple=0.0_2 , snake_case__ : List[Any]=False , snake_case__ : Dict=True , snake_case__ : Dict="None" , snake_case__ : Optional[Any]=3 , snake_case__ : List[str]=4 , snake_case__ : Dict=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 300
return config
def UpperCAmelCase__ ( self : Dict , snake_case__ : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = DebertaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )[0]
__lowerCAmelCase = model(snake_case__ , token_type_ids=snake_case__ )[0]
__lowerCAmelCase = model(snake_case__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
__lowerCAmelCase = DebertaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DebertaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case__ )
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DebertaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Any , snake_case__ : int , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = DebertaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : Any = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : List[str] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : Dict = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = DebertaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case__ )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DebertaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = DebertaModel.from_pretrained("microsoft/deberta-base" )
__lowerCAmelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(snake_case__ , attention_mask=snake_case__ )[0]
# compare the actual values for a slice.
__lowerCAmelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 376 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 376 | 1 |
def lowerCAmelCase__( lowercase : str , lowercase : str = " " ) -> List[Any]:
__snake_case : Any = []
__snake_case : List[str] = 0
for index, char in enumerate(lowerCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__snake_case : Optional[Any] = index + 1
elif index + 1 == len(lowerCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 243 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class UpperCamelCase__ ( nn.Module ,__lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
_SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1_280, 1_280)
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
_SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
_SCREAMING_SNAKE_CASE : int = 1_280
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = False
def lowerCAmelCase (self : Dict , snake_case_ : jax.random.KeyArray ):
# init input tensors
__a : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
__a : List[str] = jnp.zeros(snake_case_ , dtype=jnp.floataa )
__a : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__a : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__a , __a : Dict = jax.random.split(snake_case_ )
__a : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(snake_case_ , snake_case_ , snake_case_ , snake_case_ )["params"]
def lowerCAmelCase (self : Tuple ):
__a : Tuple = self.block_out_channels
__a : List[str] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__a : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
__a : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__a : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__a : Any = FlaxTimestepEmbedding(snake_case_ , dtype=self.dtype )
__a : str = self.only_cross_attention
if isinstance(snake_case_ , snake_case_ ):
__a : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case_ , snake_case_ ):
__a : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__a : Dict = []
__a : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__a : Any = output_channel
__a : Union[str, Any] = block_out_channels[i]
__a : List[str] = i == len(snake_case_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__a : Any = FlaxCrossAttnDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : int = FlaxDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case_ )
__a : Dict = down_blocks
# mid
__a : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__a : List[str] = []
__a : str = list(reversed(snake_case_ ) )
__a : Optional[int] = list(reversed(snake_case_ ) )
__a : Optional[int] = list(reversed(snake_case_ ) )
__a : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__a : Optional[int] = output_channel
__a : List[Any] = reversed_block_out_channels[i]
__a : str = reversed_block_out_channels[min(i + 1 , len(snake_case_ ) - 1 )]
__a : List[Any] = i == len(snake_case_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__a : Optional[Any] = FlaxCrossAttnUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : Dict = FlaxUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case_ )
__a : str = output_channel
__a : Any = up_blocks
# out
__a : List[str] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
__a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : str , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : bool = True , snake_case_ : bool = False , ):
# 1. time
if not isinstance(snake_case_ , jnp.ndarray ):
__a : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__a : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__a : List[Any] = jnp.expand_dims(snake_case_ , 0 )
__a : Tuple = self.time_proj(snake_case_ )
__a : Tuple = self.time_embedding(snake_case_ )
# 2. pre-process
__a : Union[str, Any] = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
__a : List[Any] = self.conv_in(snake_case_ )
# 3. down
__a : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case_ , snake_case_ ):
__a , __a : Dict = down_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
else:
__a , __a : Dict = down_block(snake_case_ , snake_case_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case_ , snake_case_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__a : List[str] = new_down_block_res_samples
# 4. mid
__a : Union[str, Any] = self.mid_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
__a : Dict = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case_ , snake_case_ ):
__a : int = up_block(
snake_case_ , temb=snake_case_ , encoder_hidden_states=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train , )
else:
__a : Optional[Any] = up_block(snake_case_ , temb=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train )
# 6. post-process
__a : str = self.conv_norm_out(snake_case_ )
__a : Union[str, Any] = nn.silu(snake_case_ )
__a : Any = self.conv_out(snake_case_ )
__a : Dict = jnp.transpose(snake_case_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case_ )
| 521 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCamelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def _lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("-f" )
A__ = parser.parse_args()
return args.f
def _lowerCamelCase ( UpperCAmelCase_ : Dict, UpperCAmelCase_ : Tuple="eval" ) -> List[Any]:
"""simple docstring"""
A__ = os.path.join(UpperCAmelCase_, F"""{split}_results.json""" )
if os.path.exists(UpperCAmelCase_ ):
with open(UpperCAmelCase_, "r" ) as f:
return json.load(UpperCAmelCase_ )
raise ValueError(F"""can't find {path}""" )
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase__ ( __a ):
"""simple docstring"""
def snake_case__ ( self ) -> List[str]:
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , "argv" , A__ ):
run_flax_glue.main()
A__ = get_results(A__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , "argv" , A__ ):
run_clm_flax.main()
A__ = get_results(A__ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def snake_case__ ( self ) -> int:
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , "argv" , A__ ):
run_summarization_flax.main()
A__ = get_results(A__ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , "argv" , A__ ):
run_mlm_flax.main()
A__ = get_results(A__ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def snake_case__ ( self ) -> Dict:
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , "argv" , A__ ):
run_ta_mlm_flax.main()
A__ = get_results(A__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def snake_case__ ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A__ = 7 if get_gpu_count() > 1 else 2
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , "argv" , A__ ):
run_flax_ner.main()
A__ = get_results(A__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def snake_case__ ( self ) -> Any:
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , "argv" , A__ ):
run_qa.main()
A__ = get_results(A__ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 700 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
UpperCamelCase = None
UpperCamelCase = {
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
UpperCamelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[Any]=1, UpperCAmelCase_ : Union[str, Any]=256 ) -> Any:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(UpperCAmelCase_, "r" ) as f:
return json.load(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCAmelCase_, "w" ) as f:
json.dump(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = os.path.join(UpperCAmelCase_, "tmp" )
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = read_json(os.path.join(UpperCAmelCase_, "params.json" ) )
A__ = NUM_SHARDS[model_size]
A__ = params["n_layers"]
A__ = params["n_heads"]
A__ = n_heads // num_shards
A__ = params["dim"]
A__ = dim // n_heads
A__ = 1_0000.0
A__ = 1.0 / (base ** (torch.arange(0, UpperCAmelCase_, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params["n_kv_heads"] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : List[str]=n_heads, UpperCAmelCase_ : List[str]=dim, UpperCAmelCase_ : str=dim ):
return w.view(UpperCAmelCase_, dima // n_heads // 2, 2, UpperCAmelCase_ ).transpose(1, 2 ).reshape(UpperCAmelCase_, UpperCAmelCase_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(UpperCAmelCase_, "consolidated.00.pth" ), map_location="cpu" )
else:
# Sharded
A__ = [
torch.load(os.path.join(UpperCAmelCase_, F"""consolidated.{i:02d}.pth""" ), map_location="cpu" )
for i in range(UpperCAmelCase_ )
]
A__ = 0
A__ = {"weight_map": {}}
for layer_i in range(UpperCAmelCase_ ):
A__ = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ ), UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, )
A__ = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(UpperCAmelCase_ )], dim=1 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(UpperCAmelCase_ )], dim=0 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(UpperCAmelCase_ )], dim=1 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(UpperCAmelCase_ )], dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
A__ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(UpperCAmelCase_ )], dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(UpperCAmelCase_ )], dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
# Write configs
A__ = {"total_size": param_count * 2}
write_json(UpperCAmelCase_, os.path.join(UpperCAmelCase_, "pytorch_model.bin.index.json" ) )
A__ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
A__ = params["multiple_of"] if "multiple_of" in params else 256
A__ = LlamaConfig(
hidden_size=UpperCAmelCase_, intermediate_size=compute_intermediate_size(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=UpperCAmelCase_, )
config.save_pretrained(UpperCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
A__ = LlamaForCausalLM.from_pretrained(UpperCAmelCase_, torch_dtype=torch.floataa, low_cpu_mem_usage=UpperCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(UpperCAmelCase_, safe_serialization=UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
A__ = tokenizer_class(UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
def _lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", )
parser.add_argument(
"--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], )
parser.add_argument(
"--output_dir", help="Location to write HF model and tokenizer", )
parser.add_argument("--safe_serialization", type=UpperCAmelCase_, help="Whether or not to save using `safetensors`." )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ = os.path.join(args.input_dir, "tokenizer.model" )
write_tokenizer(args.output_dir, UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 562 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_a : List[Any] = True
except ImportError:
_a : int = False
try:
from torch.hub import _get_torch_home
_a : Optional[Any] = _get_torch_home()
except ImportError:
_a : Any = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_a : Any = os.path.join(torch_cache_home, 'transformers')
_a : Dict = 'https://cdn.huggingface.co'
_a : Dict = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_a : Dict = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_a : Union[str, Any] = os.path.join(PATH, 'config.yaml')
_a : Union[str, Any] = os.path.join(PATH, 'attributes.txt')
_a : str = os.path.join(PATH, 'objects.txt')
_a : str = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_a : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_a : List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_a : Tuple = 'pytorch_model.bin'
_a : Optional[int] = 'config.yaml'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int=OBJECTS ,_lowerCamelCase : Dict=ATTRIBUTES ) -> Tuple:
_lowerCAmelCase : Optional[Any] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
_lowerCAmelCase : Optional[int] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[int]:
_lowerCAmelCase : List[Any] = OrderedDict()
with open(_lowerCamelCase ,"""rb""" ) as f:
_lowerCAmelCase : Any = pkl.load(_lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
_lowerCAmelCase : Union[str, Any] = ckp.pop(_lowerCamelCase )
if isinstance(_lowerCamelCase ,np.ndarray ):
_lowerCAmelCase : List[Any] = torch.tensor(_lowerCamelCase )
else:
assert isinstance(_lowerCamelCase ,torch.tensor ), type(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = v
return r
class __A :
_UpperCamelCase : List[Any] = {}
def __init__( self , a__ , a__ = "root" , a__=0 ):
_lowerCAmelCase : Optional[int] = name
_lowerCAmelCase : int = level
_lowerCAmelCase : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_lowerCAmelCase : Any = copy.deepcopy(_a )
_lowerCAmelCase : Optional[int] = copy.deepcopy(_a )
if isinstance(_a , _a ):
_lowerCAmelCase : List[str] = Config(_a , name=_a , level=level + 1 )
_lowerCAmelCase : Any = v
setattr(self , _a , _a )
_lowerCAmelCase : Union[str, Any] = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , a__ , a__ ):
_lowerCAmelCase : Dict = val
_lowerCAmelCase : Optional[Any] = val
_lowerCAmelCase : List[Any] = key.split(""".""" )
_lowerCAmelCase : str = len(_a ) - 1
_lowerCAmelCase : Optional[Any] = self._pointer
if len(_a ) > 1:
for i, l in enumerate(_a ):
if hasattr(self , _a ) and isinstance(getattr(self , _a ) , _a ):
setattr(getattr(self , _a ) , """.""".join(levels[i:] ) , _a )
if l == last_level:
_lowerCAmelCase : Union[str, Any] = val
else:
_lowerCAmelCase : List[Any] = pointer[l]
def __A ( self ):
return self._pointer
def __A ( self , a__ , a__ ):
with open(F"{file_name}" , """w""" ) as stream:
dump(_a , _a )
def __A ( self , a__ , a__ ):
with open(F"{file_name}" , """w""" ) as stream:
json.dump(_a , _a )
@staticmethod
def __A ( a__ ):
with open(_a ) as stream:
_lowerCAmelCase : Optional[Any] = load(_a , Loader=_a )
return data
def __str__( self ):
_lowerCAmelCase : List[Any] = """ """
if self._name != "root":
_lowerCAmelCase : Any = F"{t * (self._level-1)}{self._name}:\n"
else:
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_a , _a ):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(_a ).__name__})\n"
_lowerCAmelCase : Any = level
return r[:-1]
@classmethod
def __A ( cls , a__ , **a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = cls.get_config_dict(_a , **_a )
return cls(_a )
@classmethod
def __A ( cls , a__ , **a__ ):
_lowerCAmelCase : str = kwargs.pop("""cache_dir""" , _a )
_lowerCAmelCase : Optional[int] = kwargs.pop("""force_download""" , _a )
_lowerCAmelCase : Dict = kwargs.pop("""resume_download""" , _a )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""proxies""" , _a )
_lowerCAmelCase : Tuple = kwargs.pop("""local_files_only""" , _a )
if os.path.isdir(_a ):
_lowerCAmelCase : List[str] = os.path.join(_a , _a )
elif os.path.isfile(_a ) or is_remote_url(_a ):
_lowerCAmelCase : int = pretrained_model_name_or_path
else:
_lowerCAmelCase : Dict = hf_bucket_url(_a , filename=_a , use_cdn=_a )
try:
# Load from URL or cache if already cached
_lowerCAmelCase : int = cached_path(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_lowerCAmelCase : List[Any] = Config.load_yaml(_a )
except EnvironmentError:
_lowerCAmelCase : List[Any] = """Can\'t load config for"""
raise EnvironmentError(_a )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_a ), kwargs
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Any:
_lowerCAmelCase : Optional[Any] = torch.load("""dump.pt""" ,map_location=in_tensor.device )
_lowerCAmelCase : str = in_tensor.numpy()
_lowerCAmelCase : Dict = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(_lowerCamelCase ,_lowerCamelCase ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(_lowerCamelCase ,_lowerCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = urlparse(_lowerCamelCase )
return parsed.scheme in ("http", "https")
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ,_lowerCamelCase : str=True ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_lowerCAmelCase : str = """/""" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Dict ,_lowerCamelCase : str=None ,_lowerCamelCase : List[Any]=0 ,_lowerCamelCase : int=None ,) -> Optional[int]:
_lowerCAmelCase : Union[str, Any] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(_lowerCamelCase ,_lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
ua += "; " + user_agent
_lowerCAmelCase : Optional[Any] = {"""user-agent""": ua}
if resume_size > 0:
_lowerCAmelCase : Optional[int] = """bytes=%d-""" % (resume_size,)
_lowerCAmelCase : List[Any] = requests.get(_lowerCamelCase ,stream=_lowerCamelCase ,proxies=_lowerCamelCase ,headers=_lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
_lowerCAmelCase : Any = response.headers.get("""Content-Length""" )
_lowerCAmelCase : str = resume_size + int(_lowerCamelCase ) if content_length is not None else None
_lowerCAmelCase : List[Any] = tqdm(
unit="""B""" ,unit_scale=_lowerCamelCase ,total=_lowerCamelCase ,initial=_lowerCamelCase ,desc="""Downloading""" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCamelCase ) )
temp_file.write(_lowerCamelCase )
progress.close()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : Union[str, Any]=False ,_lowerCamelCase : Optional[int]=None ,_lowerCamelCase : List[str]=10 ,_lowerCamelCase : str=False ,_lowerCamelCase : Any=None ,_lowerCamelCase : Union[str, Any]=False ,) -> List[str]:
if cache_dir is None:
_lowerCAmelCase : Dict = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Dict = str(_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[str] = None
if not local_files_only:
try:
_lowerCAmelCase : List[Any] = requests.head(_lowerCamelCase ,allow_redirects=_lowerCamelCase ,proxies=_lowerCamelCase ,timeout=_lowerCamelCase )
if response.status_code == 200:
_lowerCAmelCase : List[Any] = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_lowerCAmelCase : Union[str, Any] = url_to_filename(_lowerCamelCase ,_lowerCamelCase )
# get cache path to put the file
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCamelCase ):
return cache_path
else:
_lowerCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(_lowerCamelCase ) ,filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_lowerCamelCase ) > 0:
return os.path.join(_lowerCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_lowerCAmelCase : Union[str, Any] = cache_path + """.lock"""
with FileLock(_lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_lowerCAmelCase : List[Any] = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_lowerCamelCase ,"""a+b""" ) as f:
yield f
_lowerCAmelCase : Tuple = _resumable_file_manager
if os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = os.stat(_lowerCamelCase ).st_size
else:
_lowerCAmelCase : List[Any] = 0
else:
_lowerCAmelCase : Optional[Any] = partial(tempfile.NamedTemporaryFile ,dir=_lowerCamelCase ,delete=_lowerCamelCase )
_lowerCAmelCase : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" ,_lowerCamelCase ,temp_file.name ,)
http_get(
_lowerCamelCase ,_lowerCamelCase ,proxies=_lowerCamelCase ,resume_size=_lowerCamelCase ,user_agent=_lowerCamelCase ,)
os.replace(temp_file.name ,_lowerCamelCase )
_lowerCAmelCase : str = {"""url""": url, """etag""": etag}
_lowerCAmelCase : Optional[Any] = cache_path + """.json"""
with open(_lowerCamelCase ,"""w""" ) as meta_file:
json.dump(_lowerCamelCase ,_lowerCamelCase )
return cache_path
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : str=None ) -> List[str]:
_lowerCAmelCase : Optional[int] = url.encode("""utf-8""" )
_lowerCAmelCase : Tuple = shaaaa(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = url_hash.hexdigest()
if etag:
_lowerCAmelCase : Dict = etag.encode("""utf-8""" )
_lowerCAmelCase : int = shaaaa(_lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int=None ,_lowerCamelCase : List[Any]=False ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : str=False ,_lowerCamelCase : Any=None ,_lowerCamelCase : List[str]=False ,_lowerCamelCase : List[Any]=False ,_lowerCamelCase : int=False ,) -> Dict:
if cache_dir is None:
_lowerCAmelCase : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = str(_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Dict = str(_lowerCamelCase )
if is_remote_url(_lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
_lowerCAmelCase : int = get_from_cache(
_lowerCamelCase ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,proxies=_lowerCamelCase ,resume_download=_lowerCamelCase ,user_agent=_lowerCamelCase ,local_files_only=_lowerCamelCase ,)
elif os.path.exists(_lowerCamelCase ):
# File, and it exists.
_lowerCAmelCase : Tuple = url_or_filename
elif urlparse(_lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_lowerCAmelCase , _lowerCAmelCase : Any = os.path.split(_lowerCamelCase )
_lowerCAmelCase : int = output_file.replace(""".""" ,"""-""" ) + """-extracted"""
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_lowerCAmelCase : List[Any] = output_path + """.lock"""
with FileLock(_lowerCamelCase ):
shutil.rmtree(_lowerCamelCase ,ignore_errors=_lowerCamelCase )
os.makedirs(_lowerCamelCase )
if is_zipfile(_lowerCamelCase ):
with ZipFile(_lowerCamelCase ,"""r""" ) as zip_file:
zip_file.extractall(_lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCamelCase ):
_lowerCAmelCase : str = tarfile.open(_lowerCamelCase )
tar_file.extractall(_lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_lowerCamelCase ) )
return output_path_extracted
return output_path
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : int="," ) -> List[Any]:
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Optional[int] = eval(f.read() )
else:
_lowerCAmelCase : List[Any] = requests.get(_lowerCamelCase )
try:
_lowerCAmelCase : List[Any] = requests.json()
except Exception:
_lowerCAmelCase : str = req.content.decode()
assert data is not None, "could not connect"
try:
_lowerCAmelCase : List[Any] = eval(_lowerCamelCase )
except Exception:
_lowerCAmelCase : Any = data.split("""\n""" )
req.close()
return data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Union[str, Any]:
_lowerCAmelCase : Dict = requests.get(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCamelCase )
with open(_lowerCamelCase ,"""rb""" ) as stream:
_lowerCAmelCase : Any = pkl.load(_lowerCamelCase )
_lowerCAmelCase : Any = weights.pop("""model""" )
_lowerCAmelCase : int = {}
for k, v in model.items():
_lowerCAmelCase : List[Any] = torch.from_numpy(_lowerCamelCase )
if "running_var" in k:
_lowerCAmelCase : Optional[Any] = torch.tensor([0] )
_lowerCAmelCase : Optional[Any] = k.replace("""running_var""" ,"""num_batches_tracked""" )
_lowerCAmelCase : Dict = zero
return new
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
print(f"{os.path.abspath(os.path.join(_lowerCamelCase ,os.pardir ) )}/demo.ipynb" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Dict="RGB" ) -> Union[str, Any]:
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : List[str] = cva.imread(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = get_image_from_url(_lowerCamelCase )
assert img is not None, f"could not connect to: {im}"
_lowerCAmelCase : Tuple = cva.cvtColor(_lowerCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_lowerCAmelCase : Any = img[:, :, ::-1]
return img
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any]=1 ) -> str:
return (images[i : i + batch] for i in range(0 ,len(_lowerCamelCase ) ,_lowerCamelCase ))
| 213 |
'''simple docstring'''
def lowerCAmelCase_ ( a : int ):
a__ = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def lowerCAmelCase_ ( a : int ):
if not isinstance(a , a ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(a ):
a__ = populate_current_row(a , a )
triangle.append(a )
return triangle
def lowerCAmelCase_ ( a : list[list[int]] , a : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def lowerCAmelCase_ ( a : list[list[int]] , a : list[int] , a : int , a : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( a : int ):
if not isinstance(a , a ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , a ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(a , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(a )
return result
def lowerCAmelCase_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a : Callable , a : int ) -> None:
a__ = f'''{func.__name__}({value})'''
a__ = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 394 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = None
_snake_case = BloomTokenizerFast
_snake_case = BloomTokenizerFast
_snake_case = True
_snake_case = False
_snake_case = '''tokenizer_file'''
_snake_case = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def A__ ( self ) -> Union[str, Any]:
super().setUp()
__lowerCAmelCase = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **snake_case_ ) -> str:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__lowerCAmelCase = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__lowerCAmelCase = tokenizer.batch_encode_plus(snake_case_ )["""input_ids"""]
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self , snake_case_=6 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCAmelCase = """This is a simple input"""
__lowerCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCAmelCase = ("""This is a simple input""", """This is a pair""")
__lowerCAmelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__lowerCAmelCase = None # Hotfixing padding = None
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=snake_case_ )
__lowerCAmelCase = next(iter(snake_case_ ) )["""premise"""] # pick up one data
__lowerCAmelCase = list(sample_data.values() )
__lowerCAmelCase = list(map(tokenizer.encode , snake_case_ ) )
__lowerCAmelCase = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) for x in output_tokens]
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=32 , snake_case_=3 , snake_case_=4 , snake_case_=[10, 20, 30, 40] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=37 , snake_case_="gelu" , snake_case_=10 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = ConvNextModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = ConvNextForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
__lowerCAmelCase = ConvNextBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> int:
__lowerCAmelCase = ConvNextModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def A__ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def A__ ( self ) -> str:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def A__ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def A__ ( self ) -> Optional[int]:
pass
def A__ ( self ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def A__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def A__ ( self ) -> str:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ():
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ) -> Any:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Any:
__lowerCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(snake_case_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**snake_case_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , A__ ):
'''simple docstring'''
_snake_case = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case = ConvNextConfig
_snake_case = False
def A__ ( self ) -> Dict:
__lowerCAmelCase = ConvNextModelTester(self )
| 573 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = 0 ) -> List[Any]:
"""simple docstring"""
__snake_case : int = right or len(_lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowerCamelCase , _lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | def __UpperCamelCase ( A ):
if len(A ) < 2:
return collection
def circle_sort_util(A , A , A ) -> bool:
UpperCamelCase__ = False
if low == high:
return swapped
UpperCamelCase__ = low
UpperCamelCase__ = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right],
collection[left],
)
UpperCamelCase__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right + 1],
collection[left],
)
UpperCamelCase__ = True
UpperCamelCase__ = low + int((high - low) / 2 )
UpperCamelCase__ = circle_sort_util(A , A , A )
UpperCamelCase__ = circle_sort_util(A , mid + 1 , A )
return swapped or left_swap or right_swap
UpperCamelCase__ = True
while is_not_sorted is True:
UpperCamelCase__ = circle_sort_util(A , 0 , len(A ) - 1 )
return collection
if __name__ == "__main__":
__magic_name__ =input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ =[int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 415 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( A_, A_ ):
'''simple docstring'''
assert isinstance(A_, A_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read()
_check_text_dataset(A_, A_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
], )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read()
_check_text_dataset(A_, A_ )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read()
_check_text_dataset(A_, A_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""", [str, list] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if issubclass(A_, A_ ):
__magic_name__ = text_path
elif issubclass(A_, A_ ):
__magic_name__ = [text_path]
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read()
_check_text_dataset(A_, A_ )
def a__ ( A_, A_, A_=("train",) ):
'''simple docstring'''
assert isinstance(A_, A_ )
for split in splits:
__magic_name__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read()
_check_text_datasetdict(A_, A_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
], )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__magic_name__ = {"""text""": """string"""}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read()
_check_text_datasetdict(A_, A_ )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if split:
__magic_name__ = {split: text_path}
else:
__magic_name__ = """train"""
__magic_name__ = {"""train""": text_path, """test""": text_path}
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read()
_check_text_datasetdict(A_, A_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 718 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = FunnelTokenizer
a__ = FunnelTokenizerFast
a__ = True
a__ = True
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
__magic_name__ = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """UNwant\u00E9d,running"""
__magic_name__ = """unwanted, running"""
return input_text, output_text
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
__magic_name__ = tokenizer("""UNwant\u00E9d,running""" )
__magic_name__ = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
__magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 76 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'ibert'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=False , __lowerCAmelCase="none" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = quant_mode
lowercase = force_dequant
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 359 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int]="attention" ) -> str:
'''simple docstring'''
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
lowercase = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowercase = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowercase = (wi_a, wi_a)
else:
lowercase = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowercase = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Any:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , *, lowerCAmelCase__ :int , lowerCAmelCase__ :bool ) -> List[str]:
'''simple docstring'''
lowercase = traverse_util.flatten_dict(variables["""target"""] )
lowercase = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowercase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old["""decoder/decoder_norm/scale"""]
lowercase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :bool ) -> int:
'''simple docstring'''
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase = state_dict["""shared.weight"""]
return state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
lowercase = convert_tax_to_pytorch(lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ )
lowercase = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :bool = False ) -> Tuple:
'''simple docstring'''
lowercase = TaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = TaEncoderModel(lowerCAmelCase__ )
else:
lowercase = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 359 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Dict=False , SCREAMING_SNAKE_CASE_: Dict=False ) -> Optional[int]:
'''simple docstring'''
A__ = "backbone." if is_semantic else ""
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', "beit.embeddings.cls_token"),
(F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"),
(F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"),
(F'{prefix}pos_embed', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int]=False , SCREAMING_SNAKE_CASE_: int=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
A__ = "backbone." if is_semantic else ""
# queries, keys and values
A__ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
A__ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
A__ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
A__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
A__ = gamma_a
A__ = gamma_a
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE_ )
A__ = val
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Any=False ) -> Dict:
'''simple docstring'''
A__ = False if "rvlcdip" in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_0_2_4
A__ = 4_0_9_6
A__ = 2_4
A__ = 1_6
# labels
if "rvlcdip" in checkpoint_url:
A__ = 1_6
A__ = "huggingface/label-files"
A__ = "rvlcdip-id2label.json"
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
A__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
A__ = prepare_img()
A__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
A__ = encoding["pixel_values"]
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits
# verify logits
A__ = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
A__ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
A__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
lowerCAmelCase__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 626 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=0.9 , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 30}
A__ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "crop_pct" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 626 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowercase :
def __init__( self , a , ):
snake_case__ : Optional[int] =parent
snake_case__ : List[str] =1_3
snake_case__ : Dict =7
snake_case__ : List[Any] =True
snake_case__ : Optional[int] =True
snake_case__ : str =True
snake_case__ : str =True
snake_case__ : List[Any] =True
snake_case__ : Tuple =False
snake_case__ : int =False
snake_case__ : List[Any] =False
snake_case__ : List[Any] =2
snake_case__ : Optional[int] =9_9
snake_case__ : Any =0
snake_case__ : int =3_2
snake_case__ : List[str] =2
snake_case__ : Union[str, Any] =4
snake_case__ : int =0.1
snake_case__ : Dict =0.1
snake_case__ : List[Any] =5_1_2
snake_case__ : int =1_6
snake_case__ : Any =2
snake_case__ : Dict =0.02
snake_case__ : str =3
snake_case__ : List[str] =4
snake_case__ : List[Any] ="""last"""
snake_case__ : List[Any] =True
snake_case__ : Optional[Any] =None
snake_case__ : List[Any] =0
def lowercase__ ( self ):
snake_case__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
snake_case__ : int =None
if self.use_input_lengths:
snake_case__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ : Optional[Any] =None
if self.use_token_type_ids:
snake_case__ : int =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case__ : Tuple =None
snake_case__ : Union[str, Any] =None
snake_case__ : Tuple =None
if self.use_labels:
snake_case__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
snake_case__ : Optional[int] =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self , a , a , a , a , a , a , a , a , a , ):
snake_case__ : Union[str, Any] =TFFlaubertModel(config=a )
snake_case__ : str ={"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
snake_case__ : Dict =model(a )
snake_case__ : Optional[Any] =[input_ids, input_mask]
snake_case__ : str =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , a , a , a , a , a , a , a , a , a , ):
snake_case__ : Union[str, Any] =TFFlaubertWithLMHeadModel(a )
snake_case__ : Any ={"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
snake_case__ : Optional[int] =model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , a , a , a , a , a , a , a , a , a , ):
snake_case__ : Tuple =TFFlaubertForQuestionAnsweringSimple(a )
snake_case__ : int ={"""input_ids""": input_ids, """lengths""": input_lengths}
snake_case__ : Dict =model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , a , a , a , a , a , a , a , a , a , ):
snake_case__ : Dict =TFFlaubertForSequenceClassification(a )
snake_case__ : Optional[int] ={"""input_ids""": input_ids, """lengths""": input_lengths}
snake_case__ : Optional[Any] =model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self , a , a , a , a , a , a , a , a , a , ):
snake_case__ : Tuple =self.num_labels
snake_case__ : List[Any] =TFFlaubertForTokenClassification(config=a )
snake_case__ : Any ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : List[str] =model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , a , a , a , a , a , a , a , a , a , ):
snake_case__ : List[Any] =self.num_choices
snake_case__ : Optional[int] =TFFlaubertForMultipleChoice(config=a )
snake_case__ : List[Any] =tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[str] =tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
snake_case__ : str =tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] ={
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : Optional[Any] =model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
snake_case__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any =config_and_inputs
snake_case__ : int ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class _lowercase ( _A , _A , unittest.TestCase ):
_a : Optional[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Tuple = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Optional[Any] = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Union[str, Any] = False
_a : Dict = False
def lowercase__ ( self , a , a , a , a , a ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self ):
snake_case__ : Tuple =TFFlaubertModelTester(self )
snake_case__ : int =ConfigTester(self , config_class=a , emb_dim=3_7 )
def lowercase__ ( self ):
self.config_tester.run_common_tests()
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a )
def lowercase__ ( self ):
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a )
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a )
def lowercase__ ( self ):
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a )
def lowercase__ ( self ):
snake_case__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a )
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a )
@slow
def lowercase__ ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple =TFFlaubertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : str =TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
snake_case__ : Union[str, Any] =tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
snake_case__ : Tuple =model(a )[0]
snake_case__ : List[Any] =tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
snake_case__ : Dict =tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 385 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
_A , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class _lowercase ( _A ):
def lowercase__ ( self , a ):
if self.framework == "tf":
snake_case__ : int =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case__ : Optional[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowercase__ ( self , a ):
snake_case__ : str =self.get_masked_index(a )
snake_case__ : Any =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def lowercase__ ( self , a ):
if isinstance(a , a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(a )
def lowercase__ ( self , a , a=None , **a ):
if return_tensors is None:
snake_case__ : Optional[Any] =self.framework
snake_case__ : List[str] =self.tokenizer(a , return_tensors=a )
self.ensure_exactly_one_mask_token(a )
return model_inputs
def lowercase__ ( self , a ):
snake_case__ : Optional[Any] =self.model(**a )
snake_case__ : str =model_inputs["""input_ids"""]
return model_outputs
def lowercase__ ( self , a , a=5 , a=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case__ : Union[str, Any] =target_ids.shape[0]
snake_case__ : Union[str, Any] =model_outputs["""input_ids"""][0]
snake_case__ : List[Any] =model_outputs["""logits"""]
if self.framework == "tf":
snake_case__ : str =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case__ : Any =outputs.numpy()
snake_case__ : Optional[Any] =outputs[0, masked_index, :]
snake_case__ : List[Any] =stable_softmax(a , axis=-1 )
if target_ids is not None:
snake_case__ : str =tf.gather_nd(tf.squeeze(a , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case__ : List[str] =tf.expand_dims(a , 0 )
snake_case__ : Optional[Any] =tf.math.top_k(a , k=a )
snake_case__ , snake_case__ : int =topk.values.numpy(), topk.indices.numpy()
else:
snake_case__ : List[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case__ : int =outputs[0, masked_index, :]
snake_case__ : Optional[int] =logits.softmax(dim=-1 )
if target_ids is not None:
snake_case__ : Dict =probs[..., target_ids]
snake_case__ , snake_case__ : List[Any] =probs.topk(a )
snake_case__ : List[Any] =[]
snake_case__ : int =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case__ : Dict =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case__ : List[Any] =input_ids.numpy().copy()
if target_ids is not None:
snake_case__ : Tuple =target_ids[p].tolist()
snake_case__ : Any =p
# Filter padding out:
snake_case__ : int =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case__ : Union[str, Any] =self.tokenizer.decode(a , skip_special_tokens=a )
snake_case__ : int ={"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(a )
result.append(a )
if single_mask:
return result[0]
return result
def lowercase__ ( self , a , a=None ):
if isinstance(a , a ):
snake_case__ : Tuple =[targets]
try:
snake_case__ : Any =self.tokenizer.get_vocab()
except Exception:
snake_case__ : List[Any] ={}
snake_case__ : Any =[]
for target in targets:
snake_case__ : Optional[int] =vocab.get(a , a )
if id_ is None:
snake_case__ : str =self.tokenizer(
a , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , max_length=1 , truncation=a , )["""input_ids"""]
if len(a ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case__ : Any =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
snake_case__ : Optional[Any] =list(set(a ) )
if len(a ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case__ : Tuple =np.array(a )
return target_ids
def lowercase__ ( self , a=None , a=None ):
snake_case__ : int ={}
if targets is not None:
snake_case__ : str =self.get_target_ids(a , a )
snake_case__ : Union[str, Any] =target_ids
if top_k is not None:
snake_case__ : Dict =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , a , *a , **a ):
snake_case__ : List[Any] =super().__call__(a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
| 385 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowerCamelCase ( *lowerCamelCase__ : Optional[int] ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[int] = list(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Dict = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowerCamelCase ( lowerCamelCase__ : Exception ):
lowercase__ : Any = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowerCamelCase ( lowerCamelCase__ : callable = None , lowerCamelCase__ : int = 1_28 ):
if function is None:
return functools.partial(lowerCamelCase__ , starting_batch_size=lowerCamelCase__ )
lowercase__ : Any = starting_batch_size
def decorator(*lowerCamelCase__ : int , **lowerCamelCase__ : int ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase__ : str = list(inspect.signature(lowerCamelCase__ ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase__ ) < (len(lowerCamelCase__ ) + 1):
lowercase__ : Dict = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 128 |
"""simple docstring"""
import socket
def _lowerCamelCase ( ):
lowercase__ : str = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ : int = socket.gethostname()
lowercase__ : Optional[Any] = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase__ : Union[str, Any] = sock.recv(10_24 )
if not data:
break
out_file.write(lowerCamelCase__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main() | 128 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_SCREAMING_SNAKE_CASE = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_SCREAMING_SNAKE_CASE = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None ) -> Optional[int]:
_lowerCAmelCase = fa_score(
lowercase__ , lowercase__ , labels=lowercase__ , pos_label=lowercase__ , average=lowercase__ , sample_weight=lowercase__ )
return {"f1": float(lowercase__ ) if score.size == 1 else score}
| 18 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Dict:
"""simple docstring"""
assert x is not None
assert y is not None
__lowercase : Optional[int] = len(_lowerCamelCase )
__lowercase : Union[str, Any] = len(_lowerCamelCase )
# declaring the array for storing the dp values
__lowercase : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1, m + 1 ):
for j in range(1, n + 1 ):
__lowercase : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
__lowercase : Any = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match )
__lowercase : Tuple = ""
__lowercase ,__lowercase : str = m, n
while i > 0 and j > 0:
__lowercase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowercase : Tuple = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A : Optional[int] = 'AGGTAB'
__A : List[Any] = 'GXTXAYB'
__A : List[Any] = 4
__A : Tuple = 'GTAB'
__A, __A : int = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 575 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ) -> Any:
"""simple docstring"""
UpperCamelCase_ : int = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=a_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=a_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=a_ )
return parser.parse_args()
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase_ : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase_ : int = script_fpath.stem
UpperCamelCase_ : List[str] = importlib.import_module(a_ )
# Patch sys.argv
UpperCamelCase_ : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCamelCase_ : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase_ : Tuple = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCamelCase_ : Optional[Any] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCamelCase_ : int = tempfile.mkdtemp()
UpperCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ : List[Any] = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
# load decoder from hub
UpperCamelCase_ : Union[str, Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
UpperCamelCase_ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(__lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCAmelCase )
def _UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase_ : str = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_decoder()
UpperCamelCase_ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Any = self.get_decoder()
UpperCamelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[Any] = floats_list((3, 10_00) )
UpperCamelCase_ : Tuple = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase_ : str = processor(__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.get_feature_extractor()
UpperCamelCase_ : List[Any] = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = """This is a test string"""
UpperCamelCase_ : Optional[Any] = processor(text=__lowerCAmelCase )
UpperCamelCase_ : int = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self , __lowerCAmelCase=(2, 10, 16) , __lowerCAmelCase=77 ):
np.random.seed(__lowerCAmelCase )
return np.random.rand(*__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Optional[int] = self.get_decoder()
UpperCamelCase_ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCamelCase_ : Any = processor.decode(__lowerCAmelCase )
UpperCamelCase_ : Any = decoder.decode_beams(__lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase_ : str = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCamelCase_ : List[Any] = processor.batch_decode(__lowerCAmelCase )
else:
with get_context(__lowerCAmelCase ).Pool() as pool:
UpperCamelCase_ : Any = processor.batch_decode(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Tuple = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
UpperCamelCase_ : Optional[int] = decoder.decode_beams_batch(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__lowerCAmelCase , decoded_processor.lm_score )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Tuple = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self._get_dummy_logits()
UpperCamelCase_ : Dict = 15
UpperCamelCase_ : str = -20.0
UpperCamelCase_ : Dict = -4.0
UpperCamelCase_ : Union[str, Any] = processor.batch_decode(
__lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
UpperCamelCase_ : Any = decoded_processor_out.text
UpperCamelCase_ : Tuple = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
UpperCamelCase_ : str = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
UpperCamelCase_ : str = [d[0][0] for d in decoded_decoder_out]
UpperCamelCase_ : List[str] = [d[0][2] for d in decoded_decoder_out]
UpperCamelCase_ : Union[str, Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __lowerCAmelCase )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , __lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , __lowerCAmelCase , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.get_feature_extractor()
UpperCamelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase_ : int = self.get_decoder()
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : str = self._get_dummy_logits()
UpperCamelCase_ : Optional[int] = 2.0
UpperCamelCase_ : List[str] = 5.0
UpperCamelCase_ : Optional[Any] = -20.0
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Union[str, Any] = processor.batch_decode(
__lowerCAmelCase , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
UpperCamelCase_ : List[str] = decoded_processor_out.text
UpperCamelCase_ : List[str] = list(__lowerCAmelCase )
decoder.reset_params(
alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
UpperCamelCase_ : int = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , )
UpperCamelCase_ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase_ : int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCamelCase_ : Any = os.listdir(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase_ : Tuple = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCamelCase_ : Union[str, Any] = os.listdir(__lowerCAmelCase )
UpperCamelCase_ : str = os.listdir(__lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Dict = floats_list((3, 10_00) )
UpperCamelCase_ : List[Any] = processor_wavaveca(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase_ : Tuple = processor_auto(__lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
UpperCamelCase_ : Optional[int] = self._get_dummy_logits()
UpperCamelCase_ : Dict = processor_wavaveca.batch_decode(__lowerCAmelCase )
UpperCamelCase_ : Any = processor_auto.batch_decode(__lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_feature_extractor()
UpperCamelCase_ : int = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : int = self._get_dummy_logits()[0]
UpperCamelCase_ : List[Any] = processor.decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Union[str, Any] = self._get_dummy_logits()
UpperCamelCase_ : Dict = processor.batch_decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase ( self ):
import torch
UpperCamelCase_ : str = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
UpperCamelCase_ : Union[str, Any] = iter(__lowerCAmelCase )
UpperCamelCase_ : int = next(__lowerCAmelCase )
UpperCamelCase_ : List[str] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCamelCase_ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCamelCase_ : Dict = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCamelCase_ : List[Any] = model(__lowerCAmelCase ).logits.cpu().numpy()
UpperCamelCase_ : Tuple = processor.decode(logits[0] , output_word_offsets=__lowerCAmelCase )
UpperCamelCase_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCamelCase_ : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCamelCase_ : Optional[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , __lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , output.text )
# output times
UpperCamelCase_ : str = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """start_time""" ) )
UpperCamelCase_ : Union[str, Any] = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """end_time""" ) )
# fmt: off
UpperCamelCase_ : Union[str, Any] = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCamelCase_ : Union[str, Any] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
| 543 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase ( A__: Optional[Any] , A__: Optional[Any]=0.999 , A__: Dict="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__: str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__: Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__lowerCamelCase : Any = []
for i in range(A__ ):
__lowerCamelCase : str = i / num_diffusion_timesteps
__lowerCamelCase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class __lowercase( lowercase__ , lowercase__ ):
'''simple docstring'''
__a : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__a : List[Any] = 2
@register_to_config
def __init__( self , __a = 1000 , __a = 0.00_085 , __a = 0.012 , __a = "linear" , __a = None , __a = "epsilon" , __a = "linspace" , __a = 0 , ):
if trained_betas is not None:
__lowerCamelCase : str = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase : List[Any] = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase : str = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__lowerCamelCase : Any = 1.0 - self.betas
__lowerCamelCase : Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
def snake_case_ ( self , __a , __a=None ):
if schedule_timesteps is None:
__lowerCamelCase : Optional[int] = self.timesteps
__lowerCamelCase : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase : Optional[int] = 1 if len(__a ) > 1 else 0
else:
__lowerCamelCase : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
__lowerCamelCase : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self , __a , __a , ):
__lowerCamelCase : List[Any] = self.index_for_timestep(__a )
if self.state_in_first_order:
__lowerCamelCase : Optional[int] = self.sigmas[step_index]
else:
__lowerCamelCase : Any = self.sigmas_interpol[step_index]
__lowerCamelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self , __a , __a = None , __a = None , ):
__lowerCamelCase : List[str] = num_inference_steps
__lowerCamelCase : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase : List[str] = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase : Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase : Optional[int] = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase : Union[str, Any] = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__lowerCamelCase : Optional[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase : List[Any] = torch.from_numpy(np.log(__a ) ).to(__a )
__lowerCamelCase : Union[str, Any] = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
__lowerCamelCase : List[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a )
# interpolate sigmas
__lowerCamelCase : List[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase : Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase : int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__a ).startswith('mps' ):
# mps does not support float64
__lowerCamelCase : Union[str, Any] = torch.from_numpy(__a ).to(__a , dtype=torch.floataa )
else:
__lowerCamelCase : Optional[int] = torch.from_numpy(__a ).to(__a )
# interpolate timesteps
__lowerCamelCase : List[Any] = self.sigma_to_t(__a ).to(__a , dtype=timesteps.dtype )
__lowerCamelCase : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase : Any = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase : Dict = defaultdict(__a )
def snake_case_ ( self , __a ):
# get log sigma
__lowerCamelCase : List[str] = sigma.log()
# get distribution
__lowerCamelCase : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase : Optional[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase : int = low_idx + 1
__lowerCamelCase : List[Any] = self.log_sigmas[low_idx]
__lowerCamelCase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase : List[Any] = (low - log_sigma) / (low - high)
__lowerCamelCase : List[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase : Dict = (1 - w) * low_idx + w * high_idx
__lowerCamelCase : Dict = t.view(sigma.shape )
return t
@property
def snake_case_ ( self ):
return self.sample is None
def snake_case_ ( self , __a , __a , __a , __a = True , ):
__lowerCamelCase : Union[str, Any] = self.index_for_timestep(__a )
# advance index counter by 1
__lowerCamelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase : Any = self.sigmas[step_index]
__lowerCamelCase : Dict = self.sigmas_interpol[step_index + 1]
__lowerCamelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase : List[str] = self.sigmas[step_index - 1]
__lowerCamelCase : Dict = self.sigmas_interpol[step_index]
__lowerCamelCase : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase : Any = 0
__lowerCamelCase : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase : str = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase : List[str] = sigma_next - sigma_hat
__lowerCamelCase : int = self.sample
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def snake_case_ ( self , __a , __a , __a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
__lowerCamelCase : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase : Union[str, Any] = self.timesteps.to(original_samples.device )
__lowerCamelCase : Union[str, Any] = timesteps.to(original_samples.device )
__lowerCamelCase : Tuple = [self.index_for_timestep(__a , __a ) for t in timesteps]
__lowerCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase : List[str] = sigma.unsqueeze(-1 )
__lowerCamelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 594 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : int = (DDPMParallelScheduler,)
def snake_case_ ( self , **__a ):
__lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__a )
return config
def snake_case_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case_ ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def snake_case_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def snake_case_ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def snake_case_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def snake_case_ ( self ):
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case_ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=__a )
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Any = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Optional[int] = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**__a )
__lowerCamelCase : Any = len(__a )
__lowerCamelCase : Dict = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[Any] = self.dummy_sample_deter + 0.1
__lowerCamelCase : Any = self.dummy_sample_deter - 0.1
__lowerCamelCase : Optional[Any] = samplea.shape[0]
__lowerCamelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCamelCase : Optional[int] = torch.arange(__a )[0:3, None].repeat(1 , __a )
__lowerCamelCase : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCamelCase : Dict = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
__lowerCamelCase : Dict = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**__a )
__lowerCamelCase : Optional[Any] = len(__a )
__lowerCamelCase : int = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
__lowerCamelCase : List[str] = model(__a , __a )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : List[str] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
__lowerCamelCase : int = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(__a ) )
__lowerCamelCase : Any = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCamelCase : Tuple = scheduler_class(**__a )
__lowerCamelCase : Tuple = len(__a )
__lowerCamelCase : str = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter
__lowerCamelCase : str = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
__lowerCamelCase : str = model(__a , __a )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
__lowerCamelCase : Optional[int] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**__a )
__lowerCamelCase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
__lowerCamelCase : str = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
__lowerCamelCase : Tuple = -1
else:
__lowerCamelCase : Dict = timesteps[i + 1]
__lowerCamelCase : Optional[Any] = scheduler.previous_timestep(__a )
__lowerCamelCase : Dict = prev_t.item()
self.assertEqual(__a , __a )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Dict = scheduler_class(**__a )
__lowerCamelCase : Any = [100, 87, 50, 51, 0]
with self.assertRaises(__a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__a )
def snake_case_ ( self ):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**__a )
__lowerCamelCase : List[str] = [100, 87, 50, 1, 0]
__lowerCamelCase : Dict = len(__a )
with self.assertRaises(__a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**__a )
__lowerCamelCase : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__a )
| 594 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = 4_2
class A_ ( _a , _a ):
@register_to_config
def __init__( self: List[Any] ,__lowerCAmelCase: int = 65_536 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: str = "fourier" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,__lowerCAmelCase: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,__lowerCAmelCase: Tuple[str] = "UNetMidBlock1D" ,__lowerCAmelCase: str = None ,__lowerCAmelCase: Tuple[int] = (32, 32, 64) ,__lowerCAmelCase: str = None ,__lowerCAmelCase: int = 8 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase : Optional[Any] = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=__lowerCAmelCase ,log=__lowerCAmelCase ,flip_sin_to_cos=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase : Any = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=__lowerCAmelCase ,downscale_freq_shift=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase : str = block_out_channels[0] * 4
_lowerCamelCase : str = TimestepEmbedding(
in_channels=__lowerCAmelCase ,time_embed_dim=__lowerCAmelCase ,act_fn=__lowerCAmelCase ,out_dim=block_out_channels[0] ,)
_lowerCamelCase : int = nn.ModuleList([] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Tuple = nn.ModuleList([] )
_lowerCamelCase : List[str] = None
# down
_lowerCamelCase : List[Any] = in_channels
for i, down_block_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = output_channel
_lowerCamelCase : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase : Tuple = i == len(__lowerCAmelCase ) - 1
_lowerCamelCase : List[Any] = get_down_block(
__lowerCAmelCase ,num_layers=__lowerCAmelCase ,in_channels=__lowerCAmelCase ,out_channels=__lowerCAmelCase ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(__lowerCAmelCase )
# mid
_lowerCamelCase : Optional[Any] = get_mid_block(
__lowerCAmelCase ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=__lowerCAmelCase ,add_downsample=__lowerCAmelCase ,)
# up
_lowerCamelCase : Optional[int] = list(reversed(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase : Tuple = out_channels
else:
_lowerCamelCase : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = output_channel
_lowerCamelCase : List[str] = (
reversed_block_out_channels[i + 1] if i < len(__lowerCAmelCase ) - 1 else final_upsample_channels
)
_lowerCamelCase : Union[str, Any] = i == len(__lowerCAmelCase ) - 1
_lowerCamelCase : Tuple = get_up_block(
__lowerCAmelCase ,num_layers=__lowerCAmelCase ,in_channels=__lowerCAmelCase ,out_channels=__lowerCAmelCase ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(__lowerCAmelCase )
_lowerCamelCase : Dict = output_channel
# out
_lowerCamelCase : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
_lowerCamelCase : List[Any] = get_out_block(
out_block_type=__lowerCAmelCase ,num_groups_out=__lowerCAmelCase ,embed_dim=block_out_channels[0] ,out_channels=__lowerCAmelCase ,act_fn=__lowerCAmelCase ,fc_dim=block_out_channels[-1] // 4 ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Dict = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : int = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCamelCase : Optional[Any] = timesteps[None].to(sample.device )
_lowerCamelCase : Dict = self.time_proj(__lowerCAmelCase )
if self.config.use_timestep_embedding:
_lowerCamelCase : Any = self.time_mlp(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = timestep_embed[..., None]
_lowerCamelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase : Any = ()
for downsample_block in self.down_blocks:
_lowerCamelCase : Dict = downsample_block(hidden_states=__lowerCAmelCase ,temb=__lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase : Union[str, Any] = self.mid_block(__lowerCAmelCase ,__lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase : Any = down_block_res_samples[-1:]
_lowerCamelCase : Tuple = down_block_res_samples[:-1]
_lowerCamelCase : str = upsample_block(__lowerCAmelCase ,res_hidden_states_tuple=__lowerCAmelCase ,temb=__lowerCAmelCase )
# 5. post-process
if self.out_block:
_lowerCamelCase : List[str] = self.out_block(__lowerCAmelCase ,__lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCAmelCase ) | 709 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
_lowerCamelCase : Any = number_of_bytes // partitions
_lowerCamelCase : Any = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Tuple = i * bytes_per_partition + 1
_lowerCamelCase : List[str] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 386 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if number < 1:
__UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : int = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Any = IFPipeline
a : str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : Dict=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Any , A_ : int , A_ : str , A_ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 616 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = len([g for position, g in enumerate(snake_case_ ) if g == main_target[position]] )
return (item, float(snake_case_ ))
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> List[Any]:
'''simple docstring'''
__snake_case = random.randint(0 , len(snake_case_ ) - 1 )
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : list[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = list(snake_case_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__snake_case = random.choice(snake_case_ )
return "".join(snake_case_ )
def lowerCamelCase__ ( snake_case_ : tuple[str, float] , snake_case_ : list[tuple[str, float]] , snake_case_ : list[str] , ) -> Optional[int]:
'''simple docstring'''
__snake_case = []
# Generate more children proportionally to the fitness score.
__snake_case = int(parent_a[1] * 100 ) + 1
__snake_case = 10 if child_n >= 10 else child_n
for _ in range(snake_case_ ):
__snake_case = population_score[random.randint(0 , snake_case_ )][0]
__snake_case = crossover(parent_a[0] , snake_case_ )
# Append new string to the population list.
pop.append(mutate(snake_case_ , snake_case_ ) )
pop.append(mutate(snake_case_ , snake_case_ ) )
return pop
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : list[str] , snake_case_ : bool = True ) -> int:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__snake_case = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(snake_case_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__snake_case = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(snake_case_ )
# Generate random starting population.
__snake_case = []
for _ in range(snake_case_ ):
population.append(''''''.join([random.choice(snake_case_ ) for i in range(len(snake_case_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__snake_case = [evaluate(snake_case_ , snake_case_ ) for item in population]
# Check if there is a matching evolution.
__snake_case = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(snake_case_ )
# Normalize population score to be between 0 and 1.
__snake_case = [
(item, score / len(snake_case_ )) for item, score in population_score
]
# This is selection
for i in range(snake_case_ ):
population.extend(select(population_score[int(snake_case_ )] , snake_case_ , snake_case_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case_ ) > N_POPULATION:
break
if __name__ == "__main__":
snake_case_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
snake_case_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
snake_case_ = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 718 |
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[Any] ):
"""simple docstring"""
__snake_case = {}
def a (self : str ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(a__ , ''' -> ''' , ''' -> '''.join([str(a__ ) for j in self.vertex[i]] ) )
def a (self : Any , a__ : int , a__ : int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a__ )
else:
# else make a new vertex
__snake_case = [to_vertex]
def a (self : Tuple ):
"""simple docstring"""
__snake_case = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a__ , a__ )
def a (self : Any , a__ : int , a__ : list ):
"""simple docstring"""
__snake_case = True
print(a__ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a__ , a__ )
if __name__ == "__main__":
snake_case_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 388 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__magic_name__ : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__magic_name__ : Any = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[str] = BlenderbotSmallTokenizer
def __init__( self , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , _A=True , **_A , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCamelCase : List[str] = add_prefix_space
def _a ( self , _A , _A=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : List[Any] = [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 102 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = ["""image_processor""", """tokenizer"""]
_lowerCamelCase : Union[str, Any] = """ViltImageProcessor"""
_lowerCamelCase : Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
a_ = kwargs.pop("""feature_extractor""" )
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
a_ = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
a_ = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def __magic_name__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __magic_name__ ( self ):
a_ = self.tokenizer.model_input_names
a_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __magic_name__ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __magic_name__ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 403 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
a_ = {}
a_ = os.path.join(UpperCamelCase , """all_results.json""" )
if os.path.exists(UpperCamelCase ):
with open(UpperCamelCase , """r""" ) as f:
a_ = json.load(UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@classmethod
def __magic_name__ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
a_ = tempfile.mkdtemp()
a_ = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
a_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ = 7 if get_gpu_count() > 1 else 2
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def __magic_name__ ( self ):
a_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) ) | 403 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class lowercase__ ( snake_case__ ):
def __init__( self : List[Any] , *snake_case__ : Dict , **snake_case__ : List[str] ):
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple=None , snake_case__ : Tuple=None , snake_case__ : Any=None ):
lowerCamelCase_ : List[str] ={}
lowerCamelCase_ : List[Any] ={}
if prompt is not None:
lowerCamelCase_ : Union[str, Any] =prompt
if generate_kwargs is not None:
lowerCamelCase_ : List[str] =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ : Optional[int] ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
lowerCamelCase_ : Dict =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : Optional[Any] ):
return super().__call__(snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Dict=None ):
lowerCamelCase_ : Any =load_image(snake_case__ )
if prompt is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"""Received an invalid text input, got - {type(snake_case__ )} - but expected a single string. """
"Note also that one single text can be provided for conditional image to text generation." )
lowerCamelCase_ : Optional[int] =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ : Optional[int] =self.image_processor(images=snake_case__ , return_tensors=self.framework )
lowerCamelCase_ : Union[str, Any] =self.tokenizer(text=snake_case__ , add_special_tokens=snake_case__ ).input_ids
lowerCamelCase_ : str =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ : Optional[Any] =torch.tensor(snake_case__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ : Union[str, Any] =self.image_processor(images=snake_case__ , header_text=snake_case__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ : Union[str, Any] =self.image_processor(images=snake_case__ , return_tensors=self.framework )
lowerCamelCase_ : Dict =self.tokenizer(snake_case__ , return_tensors=self.framework )
model_inputs.update(snake_case__ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowerCamelCase_ : Optional[int] =self.image_processor(images=snake_case__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ : Union[str, Any] =None
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any , snake_case__ : Dict=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , snake_case__ )
and all(x is None for x in model_inputs["input_ids"] )
):
lowerCamelCase_ : Tuple =None
if generate_kwargs is None:
lowerCamelCase_ : List[Any] ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ : str =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ : List[Any] =self.model.generate(snake_case__ , **snake_case__ , **snake_case__ )
return model_outputs
def UpperCAmelCase__ ( self : str , snake_case__ : Any ):
lowerCamelCase_ : Optional[Any] =[]
for output_ids in model_outputs:
lowerCamelCase_ : Tuple ={
"generated_text": self.tokenizer.decode(
snake_case__ , skip_special_tokens=snake_case__ , )
}
records.append(snake_case__ )
return records
| 153 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) -> Tuple:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowerCamelCase_ : Optional[Any] =TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCamelCase_ : Optional[int] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCamelCase_ : Any =TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCamelCase_ : Optional[Any] =4
lowerCamelCase_ : Optional[int] =True
# hparam_utils.py hparams
lowerCamelCase_ : Dict =0.66_4694
lowerCamelCase_ : List[Any] =0.20_7951
lowerCamelCase_ : int =0.12_1194
lowerCamelCase_ : Union[str, Any] =True
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : str =False
lowerCamelCase_ : int =0.035_2513
lowerCamelCase_ : str =TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCamelCase_ : List[Any] =4
lowerCamelCase_ : int =False
# hparam_utils.py hparams
lowerCamelCase_ : Tuple =36.4519
lowerCamelCase_ : List[str] =0.90_3421
lowerCamelCase_ : Optional[int] =222.088
lowerCamelCase_ : int =True
lowerCamelCase_ : Any =True
lowerCamelCase_ : List[str] =True
lowerCamelCase_ : Any =0.76_3141
lowerCamelCase_ : Dict =TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCamelCase_ : Dict =TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCamelCase_ : Optional[Any] =TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCamelCase_ : str =TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCamelCase_ : List[Any] =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 153 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCamelCase : Any = {
"camembert-base": 512,
}
__UpperCamelCase : Any = "▁"
class _UpperCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : Any="</s>" , _lowerCamelCase : Optional[int]="</s>" , _lowerCamelCase : Any="<s>" , _lowerCamelCase : Any="<unk>" , _lowerCamelCase : int="<pad>" , _lowerCamelCase : List[str]="<mask>" , _lowerCamelCase : Dict=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCamelCase : List[str] = None , **_lowerCamelCase : str , ):
'''simple docstring'''
__lowerCamelCase : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
__lowerCamelCase : int = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCamelCase : Any = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids )
__lowerCamelCase : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Optional[int] = [self.cls_token_id]
__lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Dict = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _snake_case ( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] = None ):
'''simple docstring'''
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : List[Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def _snake_case ( self : int , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def _snake_case ( self : int , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : int , _lowerCamelCase : Any ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Union[str, Any] = """"""
__lowerCamelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : str = []
else:
current_sub_tokens.append(_a )
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.__dict__.copy()
__lowerCamelCase : List[Any] = None
return state
def __setstate__( self : int , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LayoutLMv2FeatureExtractor']
__UpperCamelCase : Union[str, Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
lowercase_ : Optional[Any] = ShapEImgaImgPipeline
lowercase_ : Optional[Any] = ['''image''']
lowercase_ : List[Any] = ['''image''']
lowercase_ : List[str] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Any = False
@property
def _a ( self ) -> str:
return 32
@property
def _a ( self ) -> int:
return 32
@property
def _a ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def _a ( self ) -> Union[str, Any]:
return 8
@property
def _a ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_UpperCAmelCase = CLIPVisionModel(__snake_case )
return model
@property
def _a ( self ) -> int:
_UpperCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=__snake_case , do_normalize=__snake_case , do_resize=__snake_case , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _a ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_UpperCAmelCase = PriorTransformer(**__snake_case )
return model
@property
def _a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase = ShapERenderer(**__snake_case )
return model
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.dummy_prior
_UpperCAmelCase = self.dummy_image_encoder
_UpperCAmelCase = self.dummy_image_processor
_UpperCAmelCase = self.dummy_renderer
_UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=__snake_case , clip_sample=__snake_case , clip_sample_range=1.0 , )
_UpperCAmelCase = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _a ( self , a_ , a_=0 ) -> List[str]:
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(__snake_case )
else:
_UpperCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_UpperCAmelCase = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _a ( self ) -> Dict:
_UpperCAmelCase = "cpu"
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__snake_case )
_UpperCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ) -> Optional[Any]:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> List[str]:
_UpperCAmelCase = torch_device == "cpu"
_UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__snake_case , relax_max_difference=__snake_case , )
def _a ( self ) -> Dict:
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__snake_case )
_UpperCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = self.get_dummy_inputs(__snake_case )
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase = batch_size * [inputs[key]]
_UpperCAmelCase = pipe(**__snake_case , num_images_per_prompt=__snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Any:
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
_UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
_UpperCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_UpperCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_UpperCAmelCase = pipe(
__snake_case , generator=__snake_case , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 657 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
__a ={}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__a =key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__a =key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__a =key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__a =key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__a =key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__a =key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__a =key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__a =key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__a =key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__a =key.replace('image_encoder.module' , 'flava.image_model' )
__a =key.replace('text_encoder.module' , 'flava.text_model' )
__a =key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__a =key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__a =key.replace('text_projection' , 'flava.text_projection' )
__a =key.replace('image_projection' , 'flava.image_projection' )
__a =value.float()
for key, value in codebook_state_dict.items():
__a =value
return upgrade
@torch.no_grad()
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple , _snake_case : Tuple , _snake_case : int=None ):
"""simple docstring"""
if config_path is not None:
__a =FlavaConfig.from_pretrained(_snake_case )
else:
__a =FlavaConfig()
__a =FlavaForPreTraining(_snake_case ).eval()
__a =convert_dalle_checkpoint(_snake_case , _snake_case , save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
__a =torch.load(_snake_case , map_location='cpu' )
else:
__a =torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )
__a =upgrade_state_dict(_snake_case , _snake_case )
hf_model.load_state_dict(_snake_case )
__a =hf_model.state_dict()
__a =count_parameters(_snake_case )
__a =count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 242 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[str] = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """autoformer"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = [1, 2, 3, 4, 5, 6, 7] , snake_case = True , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 6_4 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 3_2 , snake_case = 3_2 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 1_0_0 , snake_case = 0.02 , snake_case = True , snake_case=True , snake_case = 1_0 , snake_case = 2_5 , snake_case = 3 , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[str] = prediction_length
UpperCAmelCase : str = context_length if context_length is not None else prediction_length
UpperCAmelCase : Any = distribution_output
UpperCAmelCase : List[str] = loss
UpperCAmelCase : str = input_size
UpperCAmelCase : Any = num_time_features
UpperCAmelCase : List[Any] = lags_sequence
UpperCAmelCase : List[Any] = scaling
UpperCAmelCase : str = num_dynamic_real_features
UpperCAmelCase : str = num_static_real_features
UpperCAmelCase : Any = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase : Union[str, Any] = cardinality
else:
UpperCAmelCase : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase : Optional[Any] = embedding_dimension
else:
UpperCAmelCase : str = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Tuple = encoder_attention_heads
UpperCAmelCase : List[str] = decoder_attention_heads
UpperCAmelCase : Tuple = encoder_ffn_dim
UpperCAmelCase : List[str] = decoder_ffn_dim
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : Optional[Any] = dropout
UpperCAmelCase : Optional[Any] = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : Tuple = encoder_layerdrop
UpperCAmelCase : Any = decoder_layerdrop
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : Optional[int] = init_std
UpperCAmelCase : Any = use_cache
# Autoformer
UpperCAmelCase : Union[str, Any] = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Any = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def A_ ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 720 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a : Optional[Any] = "<<<<<<< This should probably be modified because it mentions: "
a : List[Any] = "=======\n>>>>>>>\n"
a : Union[str, Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a : str = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowercase ( __magic_name__ ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
@staticmethod
def A_ ( snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=snake_case , required=snake_case , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=snake_case , required=snake_case , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , *snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = get_logger("datasets-cli/converting" )
UpperCAmelCase : Dict = tfds_path
UpperCAmelCase : Optional[int] = datasets_directory
def A_ ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
UpperCAmelCase : Optional[int] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCAmelCase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
UpperCAmelCase : int = []
UpperCAmelCase : int = []
UpperCAmelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase : List[str] = os.listdir(snake_case )
else:
UpperCAmelCase : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
UpperCAmelCase : Optional[Any] = os.path.join(snake_case , snake_case )
UpperCAmelCase : Any = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(snake_case , encoding="utf-8" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = []
for line in lines:
UpperCAmelCase : Any = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase : Any = ""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase : List[str] = "from datasets import logging\n"
elif "getLogger" in out_line:
UpperCAmelCase : Union[str, Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase : Dict = True
UpperCAmelCase : Optional[Any] = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + "\n" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase : Any = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase : int = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCAmelCase : Dict = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase : Dict = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase : List[str] = f_name.replace(".py" , "" )
UpperCAmelCase : str = os.path.join(snake_case , snake_case )
UpperCAmelCase : str = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
UpperCAmelCase : Optional[int] = os.path.basename(snake_case )
UpperCAmelCase : int = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 609 | 0 |