code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
__magic_name__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__magic_name__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_ : int = self.task_name.lower()
class UpperCamelCase_ (__A ):
__magic_name__ = '''train'''
__magic_name__ = '''dev'''
__magic_name__ = '''test'''
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : GlueDataTrainingArguments , lowerCAmelCase_ : PreTrainedTokenizerBase , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Union[str, Split] = Split.train , lowerCAmelCase_ : Optional[str] = None , ) -> int:
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = args
UpperCAmelCase_ : Any = glue_processors[args.task_name]()
UpperCAmelCase_ : Any = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
UpperCAmelCase_ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
UpperCAmelCase_ : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCAmelCase_ : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = label_list[2], label_list[1]
UpperCAmelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : Any = cached_features_file + ".lock"
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
UpperCAmelCase_ : List[Any] = time.time()
UpperCAmelCase_ : Tuple = torch.load(lowerCAmelCase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCAmelCase_ : Dict = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase_ : Union[str, Any] = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase_ : Any = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase_ : Any = examples[:limit_length]
UpperCAmelCase_ : List[str] = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
UpperCAmelCase_ : Optional[Any] = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[str] ) -> int:
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
return self.label_list
| 95 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
A_ = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
A_ = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
A_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A_ = logits[0, masked_index, :]
A_ = logits.softmax(dim=0 )
A_ , A_ = prob.topk(k=__UpperCamelCase , dim=0 )
A_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] )
A_ = tokenizer.mask_token
A_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
A_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__UpperCamelCase ) , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCamelCase , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizer.from_pretrained("camembert-base")
SCREAMING_SNAKE_CASE : Optional[int] = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 141 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
A = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
A = {
"""allenai/led-base-16384""": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[str] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__magic_name__ : Union[str, Any] = bs[:]
__magic_name__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
__magic_name__ : Optional[int] = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
def UpperCamelCase_ ( lowerCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ : Dict = set()
__magic_name__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple="replace" , snake_case : str="<s>" , snake_case : str="</s>" , snake_case : str="</s>" , snake_case : Union[str, Any]="<s>" , snake_case : Union[str, Any]="<unk>" , snake_case : List[str]="<pad>" , snake_case : Optional[Any]="<mask>" , snake_case : List[str]=False , **snake_case : Dict , ) -> Dict:
'''simple docstring'''
__magic_name__ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
__magic_name__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
__magic_name__ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
__magic_name__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
__magic_name__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
__magic_name__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding='''utf-8''' ) as vocab_handle:
__magic_name__ : List[str] = json.load(snake_case )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
__magic_name__ : List[str] = errors # how to handle errors in decoding
__magic_name__ : Optional[int] = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding='''utf-8''' ) as merges_handle:
__magic_name__ : Tuple = merges_handle.read().split('''\n''' )[1:-1]
__magic_name__ : Any = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : str = dict(zip(snake_case , range(len(snake_case ) ) ) )
__magic_name__ : Union[str, Any] = {}
__magic_name__ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def _UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : int , snake_case : Any ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__magic_name__ : Optional[int] = tuple(snake_case )
__magic_name__ : Tuple = get_pairs(snake_case )
if not pairs:
return token
while True:
__magic_name__ : List[str] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : str = []
__magic_name__ : Union[str, Any] = 0
while i < len(snake_case ):
try:
__magic_name__ : Tuple = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Any = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Any = tuple(snake_case )
__magic_name__ : Optional[int] = new_word
if len(snake_case ) == 1:
break
else:
__magic_name__ : Optional[Any] = get_pairs(snake_case )
__magic_name__ : int = ''' '''.join(snake_case )
__magic_name__ : Any = word
return word
def _UpperCAmelCase ( self : Dict , snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for token in re.findall(self.pat , snake_case ):
__magic_name__ : Any = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(''' ''' ) )
return bpe_tokens
def _UpperCAmelCase ( self : Dict , snake_case : str ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[str] , snake_case : str ) -> Dict:
'''simple docstring'''
return self.decoder.get(snake_case )
def _UpperCAmelCase ( self : List[Any] , snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = ''''''.join(snake_case )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCAmelCase ( self : Optional[int] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__magic_name__ : int = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ : int = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '''\n''' )
__magic_name__ : List[str] = 0
with open(snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__magic_name__ : Optional[int] = token_index
writer.write(''' '''.join(snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
__magic_name__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Optional[int] , snake_case : Tuple , snake_case : Optional[int]=False , **snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : str = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
__magic_name__ : int = ''' ''' + text
return (text, kwargs)
def _UpperCAmelCase ( self : Any , snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case : Optional[int] = None , snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , ) -> dict:
'''simple docstring'''
__magic_name__ : List[Any] = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
__magic_name__ : List[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__magic_name__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__magic_name__ : Dict = len(encoded_inputs['''global_attention_mask'''] ) != len(snake_case )
if needs_to_be_padded:
__magic_name__ : Tuple = len(snake_case ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__magic_name__ : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__magic_name__ : str = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 147 |
"""simple docstring"""
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , snake_case : int , snake_case : Optional[int] , snake_case : int ) -> Dict:
'''simple docstring'''
__magic_name__ : int = None
__magic_name__ : str = None
__magic_name__ : List[str] = graph
self._normalize_graph(snake_case , snake_case )
__magic_name__ : Tuple = len(snake_case )
__magic_name__ : Dict = None
def _UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : List[str] ) -> int:
'''simple docstring'''
if sources is int:
__magic_name__ : int = [sources]
if sinks is int:
__magic_name__ : Optional[int] = [sinks]
if len(snake_case ) == 0 or len(snake_case ) == 0:
return
__magic_name__ : List[Any] = sources[0]
__magic_name__ : List[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case ) > 1 or len(snake_case ) > 1:
__magic_name__ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__magic_name__ : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__magic_name__ : int = max_input_flow
__magic_name__ : Any = 0
__magic_name__ : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__magic_name__ : List[Any] = max_input_flow
__magic_name__ : int = size - 1
def _UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _UpperCAmelCase ( self : List[Any] , snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = algorithm(self )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Any ) -> str:
'''simple docstring'''
__magic_name__ : Any = flow_network
__magic_name__ : Optional[Any] = flow_network.verticesCount
__magic_name__ : List[Any] = flow_network.sourceIndex
__magic_name__ : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__magic_name__ : List[str] = flow_network.graph
__magic_name__ : List[Any] = False
def _UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
self._algorithm()
__magic_name__ : List[str] = True
def _UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str , snake_case : Dict ) -> Optional[int]:
'''simple docstring'''
super().__init__(snake_case )
# use this to save your result
__magic_name__ : Dict = -1
def _UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str , snake_case : List[Any] ) -> int:
'''simple docstring'''
super().__init__(snake_case )
__magic_name__ : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__magic_name__ : Optional[int] = [0] * self.verticies_count
__magic_name__ : Union[str, Any] = [0] * self.verticies_count
def _UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__magic_name__ : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__magic_name__ : Dict = 0
while i < len(snake_case ):
__magic_name__ : List[str] = vertices_list[i]
__magic_name__ : Union[str, Any] = self.heights[vertex_index]
self.process_vertex(snake_case )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case ) )
__magic_name__ : Optional[Any] = 0
else:
i += 1
__magic_name__ : str = sum(self.preflow[self.source_index] )
def _UpperCAmelCase ( self : Optional[int] , snake_case : List[str] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case , snake_case )
self.relabel(snake_case )
def _UpperCAmelCase ( self : List[Any] , snake_case : Any , snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _UpperCAmelCase ( self : int , snake_case : str ) -> Dict:
'''simple docstring'''
__magic_name__ : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__magic_name__ : List[str] = self.heights[to_index]
if min_height is not None:
__magic_name__ : Dict = min_height + 1
if __name__ == "__main__":
A = [0]
A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 147 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : int = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 46 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = """ClapFeatureExtractor"""
_lowerCamelCase : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Any , snake_case_ : Dict , snake_case_ : Any ):
super().__init__(snake_case_ , snake_case_ )
def __call__( self : List[str] , snake_case_ : List[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=None , **snake_case_ : Any ):
_UpperCAmelCase = kwargs.pop("sampling_rate" , snake_case_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
_UpperCAmelCase = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if audios is not None:
_UpperCAmelCase = self.feature_extractor(
snake_case_ , sampling_rate=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and audios is not None:
_UpperCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def lowercase ( self : List[Any] , *snake_case_ : str , **snake_case_ : Dict ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase ( self : List[Any] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 715 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module , snake_case_ : int ):
super().__init__()
_UpperCAmelCase = module
_UpperCAmelCase = nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
_UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase ( self : int , snake_case_ : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ):
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowerCamelCase : Any = """bigscience/bloom-1b7"""
# Constant values
_lowerCamelCase : Optional[int] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
_lowerCamelCase : str = """Hello my name is"""
_lowerCamelCase : List[Any] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowerCamelCase : List[Any] = 10
def lowercase ( self : Dict ):
# Models and tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : str ):
super().setUp()
# Models and tokenizer
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Any ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.model_abit.config
self.assertTrue(hasattr(snake_case_ , "quantization_config" ) )
_UpperCAmelCase = config.to_dict()
_UpperCAmelCase = config.to_diff_dict()
_UpperCAmelCase = config.to_json_string()
def lowercase ( self : Optional[Any] ):
from bitsandbytes.nn import Paramsabit
_UpperCAmelCase = self.model_fpaa.get_memory_footprint()
_UpperCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_UpperCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase ( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BitsAndBytesConfig()
_UpperCAmelCase = True
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : List[str] ):
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def lowercase ( self : List[Any] ):
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_fpaa.to(torch.floataa )
_UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.float()
def lowercase ( self : str ):
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=snake_case_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
@classmethod
def lowercase ( cls : List[Any] ):
_UpperCAmelCase = "t5-small"
_UpperCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
_UpperCAmelCase = "Translate in German: Hello, my dog is cute"
def lowercase ( self : Dict ):
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Any ):
from transformers import TaForConditionalGeneration
_UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
_UpperCAmelCase = None
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
_UpperCAmelCase = modules
def lowercase ( self : Any ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] ):
super().setUp()
# model_name
_UpperCAmelCase = "bigscience/bloom-560m"
_UpperCAmelCase = "t5-small"
# Different types of model
_UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Sequence classification model
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# CausalLM model
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Seq2seq model
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Dict ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Union[str, Any] ):
super().setUp()
def lowercase ( self : Optional[int] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_UpperCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Tuple ):
super().setUp()
def lowercase ( self : List[Any] ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_UpperCAmelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = "facebook/opt-350m"
super().setUp()
def lowercase ( self : Dict ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_UpperCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_UpperCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
_UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_UpperCAmelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_UpperCAmelCase = model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """gpt2-xl"""
_lowerCamelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 119 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def snake_case__ ( *a__ : int , **a__ : Optional[int] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[str] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case__ ( self : Dict , a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[str] ):
__magic_name__ = ObjectDetectionPipeline(model=a__ , image_processor=a__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case__ ( self : List[Any] , a__ : Union[str, Any] , a__ : Tuple ):
__magic_name__ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(a__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a__ , {
'''score''': ANY(a__ ),
'''label''': ANY(a__ ),
'''box''': {'''xmin''': ANY(a__ ), '''ymin''': ANY(a__ ), '''xmax''': ANY(a__ ), '''ymax''': ANY(a__ )},
} , )
import datasets
__magic_name__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__magic_name__ = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__magic_name__ = object_detector(a__ , threshold=0.0 )
self.assertEqual(len(a__ ) , len(a__ ) )
for outputs in batch_outputs:
self.assertGreater(len(a__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a__ , {
'''score''': ANY(a__ ),
'''label''': ANY(a__ ),
'''box''': {'''xmin''': ANY(a__ ), '''ymin''': ANY(a__ ), '''xmax''': ANY(a__ ), '''ymax''': ANY(a__ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def snake_case__ ( self : Union[str, Any] ):
pass
@require_torch
def snake_case__ ( self : List[Any] ):
__magic_name__ = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__magic_name__ = AutoModelForObjectDetection.from_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
__magic_name__ = ObjectDetectionPipeline(model=a__ , feature_extractor=a__ )
__magic_name__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
__magic_name__ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def snake_case__ ( self : Dict ):
__magic_name__ = '''facebook/detr-resnet-50'''
__magic_name__ = AutoModelForObjectDetection.from_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
__magic_name__ = ObjectDetectionPipeline(model=a__ , feature_extractor=a__ )
__magic_name__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__magic_name__ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def snake_case__ ( self : List[str] ):
__magic_name__ = '''facebook/detr-resnet-50'''
__magic_name__ = pipeline('''object-detection''' , model=a__ )
__magic_name__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__magic_name__ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = 0.9_985
__magic_name__ = '''facebook/detr-resnet-50'''
__magic_name__ = pipeline('''object-detection''' , model=a__ )
__magic_name__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=a__ )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def snake_case__ ( self : Dict ):
__magic_name__ = '''Narsil/layoutlmv3-finetuned-funsd'''
__magic_name__ = 0.9_993
__magic_name__ = pipeline('''object-detection''' , model=a__ , threshold=a__ )
__magic_name__ = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 432 |
'''simple docstring'''
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class a ( Generic[T, U] ):
def __init__( self : Any , lowerCAmelCase : T | None , lowerCAmelCase : U | None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =key
SCREAMING_SNAKE_CASE_: int =val
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] | None =None
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] | None =None
def __repr__( self : int ) -> Optional[int]:
'''simple docstring'''
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class a ( Generic[T, U] ):
def __init__( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] =DoubleLinkedListNode(__A , __A )
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] =DoubleLinkedListNode(__A , __A )
SCREAMING_SNAKE_CASE_: List[str] =self.rear, self.head
def __repr__( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =["DoubleLinkedList"]
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.head
while node.next is not None:
rep.append(str(__A ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =node.next
rep.append(str(self.rear ) )
return ",\n ".join(__A )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : DoubleLinkedListNode[T, U] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE_: Dict =node
SCREAMING_SNAKE_CASE_: Optional[Any] =previous
SCREAMING_SNAKE_CASE_: int =node
SCREAMING_SNAKE_CASE_: int =self.rear
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : DoubleLinkedListNode[T, U] ) -> Optional[Any]:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE_: List[Any] =node.next
SCREAMING_SNAKE_CASE_: Any =node.prev
SCREAMING_SNAKE_CASE_: Any =None
SCREAMING_SNAKE_CASE_: Optional[Any] =None
return node
class a ( Generic[T, U] ):
UpperCamelCase : Optional[Any] = {}
def __init__( self : str , lowerCAmelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: DoubleLinkedList[T, U] =DoubleLinkedList()
SCREAMING_SNAKE_CASE_: Dict =capacity
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: Optional[int] =0
SCREAMING_SNAKE_CASE_: dict[T, DoubleLinkedListNode[T, U]] ={}
def __repr__( self : Any ) -> Any:
'''simple docstring'''
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Union[str, Any] , lowerCAmelCase : T ) -> Any:
'''simple docstring'''
return key in self.cache
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : T ) -> List[Any]:
'''simple docstring'''
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] =self.cache[key]
SCREAMING_SNAKE_CASE_: int =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__A )
return node.val
self.miss += 1
return None
def lowerCamelCase__ ( self : int , lowerCAmelCase : T , lowerCAmelCase : U ) -> str:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE_: Dict =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE_: Optional[Any] =DoubleLinkedListNode(__A , __A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE_: List[Any] =value
self.list.add(__A )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : int = 128 ) -> str:
'''simple docstring'''
def cache_decorator_inner(lowerCAmelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE_: Tuple =LRUCache(__A )
SCREAMING_SNAKE_CASE_: str =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE_: Optional[int] =func(*__A )
cls.decorator_function_to_instance_map[func].put(args[0] , __A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__A , """cache_info""" , __A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase_ :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : int =parent
SCREAMING_SNAKE_CASE_ : Optional[int] =batch_size
SCREAMING_SNAKE_CASE_ : Tuple =seq_length
SCREAMING_SNAKE_CASE_ : Tuple =is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE_ : Any =use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE_ : Optional[int] =use_mc_token_ids
SCREAMING_SNAKE_CASE_ : List[Any] =vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : int =intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_ : Tuple =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] =type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int =initializer_range
SCREAMING_SNAKE_CASE_ : str =num_labels
SCREAMING_SNAKE_CASE_ : Tuple =num_choices
SCREAMING_SNAKE_CASE_ : Any =scope
SCREAMING_SNAKE_CASE_ : Tuple =self.vocab_size - 1
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ : int =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ : str =None
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : Tuple =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_config()
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self ) -> Any:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] =CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : List[str] =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[Any] =CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[str] =config_and_inputs
SCREAMING_SNAKE_CASE_ : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _snake_case ( self , __A , __A , __A , __A , *__A ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : int =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowercase_ ( A , A , A , unittest.TestCase ):
__lowerCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCamelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self , __A , __A , __A , __A , __A ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[int] =ConfigTester(self , config_class=__A , n_embd=37 )
def _snake_case ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Dict:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ) -> Union[str, Any]:
pass
@slow
def _snake_case ( self ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] =CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case ( self ) -> Any:
pass
@require_torch
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
SCREAMING_SNAKE_CASE_ : Dict =[
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ : List[str] =model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 443 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int | float | str ) -> tuple[int, int]:
try:
SCREAMING_SNAKE_CASE_ : int =float(UpperCAmelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE_ : Any =decimal - int(UpperCAmelCase_ )
if fractional_part == 0:
return int(UpperCAmelCase_ ), 1
else:
SCREAMING_SNAKE_CASE_ : Any =len(str(UpperCAmelCase_ ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE_ : str =int(decimal * (1_0**number_of_frac_digits) )
SCREAMING_SNAKE_CASE_ : Any =1_0**number_of_frac_digits
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =denominator, numerator
while True:
SCREAMING_SNAKE_CASE_ : Any =dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =divisor, remainder
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =numerator / divisor, denominator / divisor
return int(UpperCAmelCase_ ), int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 443 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : Optional[int] = '''LayoutLMv3ImageProcessor'''
SCREAMING_SNAKE_CASE__ : Any = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , **lowerCAmelCase__ :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase__ :Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase__ :Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :Optional[int] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__SCREAMING_SNAKE_CASE : Optional[int] = features['''words''']
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel values
__SCREAMING_SNAKE_CASE : str = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__SCREAMING_SNAKE_CASE : Dict = images
return encoded_inputs
def __magic_name__( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__SCREAMING_SNAKE_CASE : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}''' )
return images_with_overflow
def __magic_name__( self :Optional[Any] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :int ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :str ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :List[Any] ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __magic_name__( self :Union[str, Any] ) -> Any:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __magic_name__( self :str ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 260 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if index == r:
for j in range(lowercase__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__SCREAMING_SNAKE_CASE : int = arr[i]
combination_util(lowercase__ , lowercase__ , lowercase__ , index + 1 , lowercase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# A temporary array to store all combination one by one
__SCREAMING_SNAKE_CASE : Dict = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase__ , lowercase__ , lowercase__ , 0 , lowercase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase : Tuple =[1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 260 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
UpperCAmelCase_ = 3e8 # unit of c : m * s^-1
def __magic_name__ ( lowercase , lowercase , lowercase ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowercase_ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase_ : Dict = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase_ : Optional[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 458 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] = StableUnCLIPPipeline
__a : int = TEXT_TO_IMAGE_PARAMS
__a : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__a : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__a : Tuple = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = 32
lowercase_ : Tuple = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=snake_case__, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
lowercase_ : Tuple = PriorTransformer(
num_attention_heads=2, attention_head_dim=12, embedding_dim=snake_case__, num_layers=1, )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
variance_type="""fixed_small_log""", prediction_type="""sample""", num_train_timesteps=10_00, clip_sample=snake_case__, clip_sample_range=5.0, beta_schedule="""squaredcos_cap_v2""", )
# regular denoising components
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=snake_case__ )
lowercase_ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="""projection""", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=snake_case__, layers_per_block=1, upcast_attention=snake_case__, use_linear_projection=snake_case__, )
torch.manual_seed(0 )
lowercase_ : Dict = DDIMScheduler(
beta_schedule="""scaled_linear""", beta_start=0.00085, beta_end=0.012, prediction_type="""v_prediction""", set_alpha_to_one=snake_case__, steps_offset=1, )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL()
lowercase_ : str = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def snake_case__ ( self, snake_case__, snake_case__=0 ) -> str:
"""simple docstring"""
if str(snake_case__ ).startswith("""mps""" ):
lowercase_ : Tuple = torch.manual_seed(snake_case__ )
else:
lowercase_ : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case__ )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Any:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase_ : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : Dict = pipe("""anime turle""", generator=snake_case__, output_type="""np""" )
lowercase_ : str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__, snake_case__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ : str = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
lowercase_ : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : List[Any] = pipe(
"""anime turtle""", prior_num_inference_steps=2, num_inference_steps=2, output_type="""np""", )
lowercase_ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 458 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'timm_backbone'
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__A )
snake_case: Optional[int] = backbone
snake_case: Any = num_channels
snake_case: Tuple = features_only
snake_case: Optional[Any] = use_pretrained_backbone
snake_case: Optional[int] = True
snake_case: List[str] = out_indices if out_indices is not None else (-1,)
| 711 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def a_ (_lowerCAmelCase : str )-> YolosConfig:
snake_case: List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
snake_case: Optional[int] = 192
snake_case: List[str] = 768
snake_case: int = 12
snake_case: Any = 3
snake_case: Union[str, Any] = [800, 1333]
snake_case: Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
snake_case: Any = 330
snake_case: Tuple = 14
snake_case: Optional[int] = 6
snake_case: str = 1320
elif "yolos_s" in yolos_name:
snake_case: Dict = 384
snake_case: Optional[int] = 1536
snake_case: Dict = 12
snake_case: Optional[Any] = 6
elif "yolos_b" in yolos_name:
snake_case: Dict = [800, 1344]
snake_case: Union[str, Any] = 91
snake_case: List[str] = """huggingface/label-files"""
snake_case: List[str] = """coco-detection-id2label.json"""
snake_case: str = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case: str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case: Optional[int] = idalabel
snake_case: Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a_ (_lowerCAmelCase : dict , _lowerCAmelCase : YolosConfig , _lowerCAmelCase : bool = False )-> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case: Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
snake_case: str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case: Tuple = in_proj_weight[: config.hidden_size, :]
snake_case: int = in_proj_bias[: config.hidden_size]
snake_case: Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case: Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case: str = in_proj_weight[-config.hidden_size :, :]
snake_case: List[Any] = in_proj_bias[-config.hidden_size :]
def a_ (_lowerCAmelCase : str )-> str:
if "backbone" in name:
snake_case: str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
snake_case: List[str] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
snake_case: int = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
snake_case: int = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
snake_case: List[Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case: str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
snake_case: Tuple = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
snake_case: Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case: str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case: List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case: List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case: Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case: List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
snake_case: List[Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
snake_case: Optional[Any] = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
snake_case: Tuple = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def a_ (_lowerCAmelCase : dict , _lowerCAmelCase : YolosForObjectDetection )-> dict:
for key in orig_state_dict.copy().keys():
snake_case: Union[str, Any] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
snake_case: Optional[Any] = key.split(""".""" )
snake_case: List[str] = int(key_split[2] )
snake_case: Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
snake_case: Tuple = val[:dim, :]
snake_case: Any = val[
dim : dim * 2, :
]
snake_case: int = val[-dim:, :]
else:
snake_case: List[str] = val[:dim]
snake_case: List[Any] = val[dim : dim * 2]
snake_case: List[str] = val[-dim:]
else:
snake_case: List[Any] = val
return orig_state_dict
def a_ ()-> torch.Tensor:
snake_case: Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case: Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False )-> Any:
snake_case: Union[str, Any] = get_yolos_config(_lowerCAmelCase )
# load original state_dict
snake_case: List[str] = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# load 🤗 model
snake_case: Any = YolosForObjectDetection(_lowerCAmelCase )
model.eval()
snake_case: List[Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by YolosImageProcessor
snake_case: Dict = 800 if yolos_name != """yolos_ti""" else 512
snake_case: int = YolosImageProcessor(format="""coco_detection""" , size=_lowerCAmelCase )
snake_case: Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case: List[Any] = model(**_lowerCAmelCase )
snake_case , snake_case: Any = outputs.logits, outputs.pred_boxes
snake_case , snake_case: Optional[int] = None, None
if yolos_name == "yolos_ti":
snake_case: List[str] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
snake_case: Tuple = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
snake_case: List[str] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
snake_case: str = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
snake_case: Tuple = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
snake_case: Optional[int] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
snake_case: Optional[int] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
snake_case: Dict = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
snake_case: List[Any] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
snake_case: Optional[int] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
snake_case: Optional[Any] = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
snake_case: Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(_lowerCAmelCase , organization="""hustvl""" )
model.push_to_hub(_lowerCAmelCase , organization="""hustvl""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 164 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
snake_case__ : Tuple = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = LxmertTokenizer
def __init__( self : str , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Optional[int]="[UNK]" , lowerCamelCase_ : str="[SEP]" , lowerCamelCase_ : str="[PAD]" , lowerCamelCase_ : int="[CLS]" , lowerCamelCase_ : Optional[Any]="[MASK]" , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(lowerCamelCase_ , normalizer_state.pop("""type""" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**lowerCamelCase_ )
UpperCAmelCase__ = do_lower_case
def UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any]=None ) ->Dict:
'''simple docstring'''
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) ->List[Any]:
'''simple docstring'''
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 392 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 546 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=13 , lowercase__ : Tuple=7 , lowercase__ : Tuple=True , lowercase__ : Any=True , lowercase__ : Tuple=True , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=99 , lowercase__ : Optional[int]=24 , lowercase__ : int=2 , lowercase__ : List[str]=6 , lowercase__ : int=37 , lowercase__ : Tuple="gelu" , lowercase__ : Optional[int]=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : List[str]=5_12 , lowercase__ : Any=16 , lowercase__ : Union[str, Any]=2 , lowercase__ : Any=0.02 , lowercase__ : Optional[Any]=3 , lowercase__ : List[Any]=None , lowercase__ : List[Any]=10_00 , ) ->List[str]:
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = scope
_lowercase = range_bbox
def _UpperCAmelCase ( self : Any) ->Optional[int]:
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase = bbox[i, j, 3]
_lowercase = bbox[i, j, 1]
_lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase = bbox[i, j, 2]
_lowercase = bbox[i, j, 0]
_lowercase = t
_lowercase = None
if self.use_input_mask:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowercase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCAmelCase ( self : str) ->List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : int , lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : List[Any] , ) ->Any:
"""simple docstring"""
_lowercase = LiltModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
_lowercase = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_)
_lowercase = model(lowercase_ , bbox=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : List[Any] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str , lowercase__ : int , ) ->Optional[int]:
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = LiltForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : List[Any] , ) ->List[Any]:
"""simple docstring"""
_lowercase = LiltForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowercase = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : Optional[Any]) ->int:
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
(
_lowercase
) = config_and_inputs
_lowercase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __a ( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _UpperCAmelCase ( self : Dict , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : str , lowercase__ : Any) ->Tuple:
"""simple docstring"""
return True
def _UpperCAmelCase ( self : Tuple) ->str:
"""simple docstring"""
_lowercase = LiltModelTester(self)
_lowercase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _UpperCAmelCase ( self : int) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _UpperCAmelCase ( self : Union[str, Any]) ->str:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase = type
self.model_tester.create_and_check_model(*lowercase_)
def _UpperCAmelCase ( self : int) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def _UpperCAmelCase ( self : Tuple) ->Any:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def _UpperCAmelCase ( self : str) ->Any:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = LiltModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_torch
@slow
class __a ( unittest.TestCase ):
def _UpperCAmelCase ( self : int) ->List[str]:
"""simple docstring"""
_lowercase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""").to(lowercase_)
_lowercase = torch.tensor([[1, 2]] , device=lowercase_)
_lowercase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_)
# forward pass
with torch.no_grad():
_lowercase = model(input_ids=lowercase_ , bbox=lowercase_)
_lowercase = torch.Size([1, 2, 7_68])
_lowercase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
| 708 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Optional[Any] , lowercase__ : int) ->None:
"""simple docstring"""
_lowercase = num_of_nodes
_lowercase = []
_lowercase = {}
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : int , lowercase__ : int , lowercase__ : int) ->None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight])
def _UpperCAmelCase ( self : Any , lowercase__ : int) ->int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def _UpperCAmelCase ( self : List[str] , lowercase__ : int) ->None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase = self.find_component(lowercase__)
def _UpperCAmelCase ( self : Dict , lowercase__ : list[int] , lowercase__ : int , lowercase__ : int) ->None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
_lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase__)
elif component_size[u_node] >= component_size[v_node]:
_lowercase = self.find_component(lowercase__)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase__)
def _UpperCAmelCase ( self : Optional[int]) ->None:
"""simple docstring"""
_lowercase = []
_lowercase = 0
_lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
_lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase = edge
_lowercase = self.m_component[u]
_lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase__ , lowercase__):
_lowercase , _lowercase , _lowercase = edge
_lowercase = self.m_component[u]
_lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase__ , lowercase__ , lowercase__)
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
_lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _snake_case ( _snake_case : Tuple , _snake_case : str ) -> Any:
'''simple docstring'''
for e in env_keys:
_A = int(os.environ.get(_snake_case , -1 ) )
if val >= 0:
return val
return default
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Tuple=False ) -> Any:
'''simple docstring'''
_A = os.environ.get(_snake_case , str(_snake_case ) )
return strtobool(_snake_case ) == 1 # As its name indicates `strtobool` actually returns an int...
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[Any]="no" ) -> Optional[int]:
'''simple docstring'''
_A = os.environ.get(_snake_case , str(_snake_case ) )
return value
| 7 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase__ ( lowercase_ ,lowercase_=10 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : str = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase__ ( lowercase_ ,lowercase_=10 ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : List[str] = os.path.join(lowercase_ ,"schedule.bin" )
torch.save(scheduler.state_dict() ,lowercase_ )
_UpperCamelCase : Optional[int] = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Dict , __a : Any , __a : List[str] ) -> List[str]:
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
_UpperCamelCase : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
_UpperCamelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCamelCase : Dict = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCamelCase : List[Any] = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
_UpperCamelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
_UpperCamelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCamelCase : Dict = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__a , weight_decay=0.0 , relative_step=__a , scale_parameter=__a , warmup_init=__a , )
for _ in range(1000 ):
_UpperCamelCase : Optional[Any] = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ :List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ :List[str] = 10
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : List[Any] , __a : Dict , __a : str=None ) -> List[Any]:
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a , msg=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCamelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
_UpperCamelCase, _UpperCamelCase : List[str] = data
_UpperCamelCase : List[str] = scheduler_func(self.optimizer , **__a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCamelCase : Optional[int] = unwrap_schedule(__a , self.num_steps )
self.assertListAlmostEqual(
__a , __a , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCamelCase : List[str] = scheduler_func(self.optimizer , **__a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__a ) # wrap to test picklability of the schedule
_UpperCamelCase : int = unwrap_and_save_reload_schedule(__a , self.num_steps )
self.assertListEqual(__a , __a , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : Any = fn
def __call__( self : Tuple , *__a : Optional[Any] , **__a : str ) -> Any:
return self.fn(*__a , **__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any ) -> Tuple:
_UpperCamelCase : Any = list(map(self , scheduler.lr_lambdas ) )
| 624 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase__ =logging.get_logger(__name__)
class lowerCamelCase__ ( _a ):
a : str = ["""input_features""", """is_longer"""]
def __init__( self : Any , A_ : Optional[int]=6_4 , A_ : Optional[Any]=4_8_0_0_0 , A_ : Any=4_8_0 , A_ : Optional[Any]=1_0 , A_ : Optional[Any]=1_0_2_4 , A_ : List[str]=0.0 , A_ : str=False , A_ : float = 0 , A_ : float = 1_4_0_0_0 , A_ : int = None , A_ : str = "fusion" , A_ : str = "repeatpad" , **A_ : Dict , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , return_attention_mask=A_ , **A_ , )
__lowercase = top_db
__lowercase = truncation
__lowercase = padding
__lowercase = fft_window_size
__lowercase = (fft_window_size >> 1) + 1
__lowercase = hop_length
__lowercase = max_length_s
__lowercase = max_length_s * sampling_rate
__lowercase = sampling_rate
__lowercase = frequency_min
__lowercase = frequency_max
__lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A_ , min_frequency=A_ , max_frequency=A_ , sampling_rate=A_ , norm=A_ , mel_scale="""htk""" , )
__lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A_ , min_frequency=A_ , max_frequency=A_ , sampling_rate=A_ , norm="""slaney""" , mel_scale="""slaney""" , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : np.array , A_ : Optional[np.array] = None ):
'''simple docstring'''
__lowercase = spectrogram(
A_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : Any , A_ : Tuple , A_ : str ):
'''simple docstring'''
__lowercase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowercase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowercase = [0]
# randomly choose index for each part
__lowercase = np.random.choice(ranges[0] )
__lowercase = np.random.choice(ranges[1] )
__lowercase = np.random.choice(ranges[2] )
__lowercase = mel[idx_front : idx_front + chunk_frames, :]
__lowercase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowercase = mel[idx_back : idx_back + chunk_frames, :]
__lowercase = torch.tensor(mel[None, None, :] )
__lowercase = torch.nn.functional.interpolate(
A_ , size=[chunk_frames, 6_4] , mode="""bilinear""" , align_corners=A_ )
__lowercase = mel_shrink[0][0].numpy()
__lowercase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : np.array , A_ : List[str] , A_ : Dict , A_ : Optional[Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowercase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowercase = len(A_ ) - max_length
__lowercase = np.random.randint(0 , overflow + 1 )
__lowercase = waveform[idx : idx + max_length]
__lowercase = self._np_extract_fbank_features(A_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowercase = self._np_extract_fbank_features(A_ , self.mel_filters )
__lowercase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowercase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowercase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowercase = False
else:
__lowercase = self._random_mel_fusion(A_ , A_ , A_ )
__lowercase = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
__lowercase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowercase = int(max_length / len(A_ ) )
__lowercase = np.stack(np.tile(A_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowercase = int(max_length / len(A_ ) )
__lowercase = np.stack(np.tile(A_ , A_ ) )
__lowercase = np.pad(A_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
__lowercase = self._np_extract_fbank_features(A_ , self.mel_filters )
__lowercase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowercase = self._np_extract_fbank_features(A_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] , A_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A_ : str = None , A_ : Optional[str] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , **A_ : Any , ):
'''simple docstring'''
__lowercase = truncation if truncation is not None else self.truncation
__lowercase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowercase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__lowercase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__lowercase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [np.asarray(A_ )]
# convert to mel spectrogram, truncate and pad if needed.
__lowercase = [
self._get_input_mel(A_ , max_length if max_length else self.nb_max_samples , A_ , A_ )
for waveform in raw_speech
]
__lowercase = []
__lowercase = []
for mel, longer in padded_inputs:
input_mel.append(A_ )
is_longer.append(A_ )
if truncation == "fusion" and sum(A_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowercase = np.random.randint(0 , len(A_ ) )
__lowercase = True
if isinstance(input_mel[0] , A_ ):
__lowercase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowercase = [[longer] for longer in is_longer]
__lowercase = {"""input_features""": input_mel, """is_longer""": is_longer}
__lowercase = BatchFeature(A_ )
if return_tensors is not None:
__lowercase = input_features.convert_to_tensors(A_ )
return input_features
| 721 |
"""simple docstring"""
import inspect
import unittest
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__lowercase = inspect.getmembers(A_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__lowercase = """k-diffusion"""
elif backend == "invisible_watermark":
__lowercase = """invisible-watermark"""
assert backend in deps, F'''{backend} is not in the deps table!'''
| 442 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase_ = False
class _A ( unittest.TestCase ):
def __a ( self : Dict , _A : Tuple=32 ) -> Optional[Any]:
"""simple docstring"""
set_seed(0 )
lowercase : int = UNetaDModel(sample_size=_A , in_channels=3 , out_channels=3 )
lowercase : int = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_A , )
lowercase : Tuple = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_A , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase : Union[str, Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_A ) for _ in range(4 )]
lowercase : Optional[Any] = [torch.randn((4, 3, 32, 32) ).to(_A ) for _ in range(4 )]
lowercase : Any = [torch.randint(0 , 1_000 , (4,) ).long().to(_A ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase : Any = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
lowercase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase : List[Any] = model(_A , timesteps[i] ).sample
lowercase : Dict = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase : str = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
lowercase : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase : Optional[Any] = model(_A , timesteps[i] ).sample
lowercase : Optional[int] = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) )
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) ) | 217 |
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowercase : Union[str, Any] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__magic_name__ )
else:
lowercase : int = sylvester(number - 1 )
lowercase : List[Any] = num - 1
lowercase : str = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 217 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCamelCase = "instructblip_vision_model"
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=1E-6 , __A=0.0 , __A=1E-10 , __A=True , **__A , ):
super().__init__(**_lowerCamelCase )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def snake_case_ ( cls , __A , **__A ):
cls._set_token_in_kwargs(_lowerCamelCase )
__a , __a = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__a = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCamelCase = "instructblip_qformer"
def __init__( self , __A=30522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def snake_case_ ( cls , __A , **__A ):
cls._set_token_in_kwargs(_lowerCamelCase )
__a , __a = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__a = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCamelCase = "instructblip"
_lowerCamelCase = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ):
super().__init__(**_lowerCamelCase )
if vision_config is None:
__a = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
__a = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
__a = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__a = InstructBlipVisionConfig(**_lowerCamelCase )
__a = InstructBlipQFormerConfig(**_lowerCamelCase )
__a = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__a = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def snake_case_ ( cls , __A , __A , __A , **__A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def snake_case_ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 712 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ):
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
__a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__a = """cpu"""
__a = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
__a = Path(lowerCAmelCase__ )
# TEXT ENCODER
__a = pipeline.text_encoder.config.max_position_embeddings
__a = pipeline.text_encoder.config.hidden_size
__a = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
__a = pipeline.unet.config.in_channels
__a = pipeline.unet.config.sample_size
__a = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
__a = str(unet_path.absolute().as_posix() )
__a = os.path.dirname(lowerCAmelCase__ )
__a = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location="""weights.pb""" , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
__a = pipeline.vae
__a = vae_encoder.config.in_channels
__a = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__a = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
__a = pipeline.vae
__a = vae_decoder.config.latent_channels
__a = vae_decoder.config.out_channels
# forward only through the decoder part
__a = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__a = pipeline.safety_checker
__a = safety_checker.config.vision_config.num_channels
__a = safety_checker.config.vision_config.image_size
__a = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
__a = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
__a = pipeline.feature_extractor
else:
__a = None
__a = None
__a = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print("""ONNX pipeline saved to""" , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
__a = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 209 | 0 |
import os
import sys
import unittest
a__ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a__ : str = os.path.join(git_repo_path, 'src', 'diffusers')
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__snake_case ,'''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__snake_case ,'''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__snake_case ,'''torch_and_transformers_and_onnx''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' ,__snake_case )
self.assertIn('''torch_and_transformers''' ,__snake_case )
self.assertIn('''flax_and_transformers''' ,__snake_case )
self.assertIn('''torch_and_transformers_and_onnx''' ,__snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' ,objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' ,objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' ,objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' ,objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' ,objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' ,objects['''torch_and_transformers_and_onnx'''] )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = create_dummy_object('''CONSTANT''' ,'''\'torch\'''' )
self.assertEqual(__snake_case ,'''\nCONSTANT = None\n''' )
A_ = create_dummy_object('''function''' ,'''\'torch\'''' )
self.assertEqual(
__snake_case ,'''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A_ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
A_ = create_dummy_object('''FakeClass''' ,'''\'torch\'''' )
self.assertEqual(__snake_case ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
A_ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] ,__snake_case )
| 188 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Tuple = 'Hello, World!'
a__ : Any = 'en_XX'
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :str , _UpperCAmelCase :bool ) -> Optional[Any]:
'''simple docstring'''
A_ = Path('''data_bin''' )
A_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_UpperCAmelCase ).parent ) , checkpoint_file=Path(_UpperCAmelCase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_UpperCAmelCase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_UpperCAmelCase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_UpperCAmelCase )
A_ = xmod.model.encoder.sentence_encoder
A_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A_ = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _UpperCAmelCase )
A_ = XmodForSequenceClassification(_UpperCAmelCase ) if classification_head else XmodForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ = xmod_sent_encoder.embed_tokens.weight
A_ = xmod_sent_encoder.embed_positions.weight
A_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A_ = xmod_sent_encoder.layernorm_embedding.weight
A_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ = model.roberta.encoder.layer[i]
A_ = xmod_sent_encoder.layers[i]
# self attention
A_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
A_ = xmod_layer.self_attn.q_proj.weight
A_ = xmod_layer.self_attn.q_proj.bias
A_ = xmod_layer.self_attn.k_proj.weight
A_ = xmod_layer.self_attn.k_proj.bias
A_ = xmod_layer.self_attn.v_proj.weight
A_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
A_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
A_ = xmod_layer.self_attn.out_proj.weight
A_ = xmod_layer.self_attn.out_proj.bias
A_ = xmod_layer.self_attn_layer_norm.weight
A_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
A_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
A_ = xmod_layer.fca.weight
A_ = xmod_layer.fca.bias
# output
A_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
A_ = xmod_layer.fca.weight
A_ = xmod_layer.fca.bias
A_ = xmod_layer.final_layer_norm.weight
A_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A_ = xmod_layer.adapter_layer_norm.weight
A_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A_ = bert_output.adapter_modules[lang_code]
A_ = xmod_layer.adapter_modules[lang_code]
A_ = from_adapter.fca.weight
A_ = from_adapter.fca.bias
A_ = from_adapter.fca.weight
A_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A_ = xmod_sent_encoder.layer_norm.weight
A_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
A_ = xmod.model.classification_heads['''mnli'''].dense.weight
A_ = xmod.model.classification_heads['''mnli'''].dense.bias
A_ = xmod.model.classification_heads['''mnli'''].out_proj.weight
A_ = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ = xmod.model.encoder.lm_head.dense.weight
A_ = xmod.model.encoder.lm_head.dense.bias
A_ = xmod.model.encoder.lm_head.layer_norm.weight
A_ = xmod.model.encoder.lm_head.layer_norm.bias
A_ = xmod.model.encoder.lm_head.weight
A_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ = xmod.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_UpperCAmelCase )
A_ = model(_UpperCAmelCase )[0]
if classification_head:
A_ = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_UpperCAmelCase ) )
else:
A_ = xmod.model(_UpperCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
A_ = torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a__ : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 188 | 1 |
"""simple docstring"""
import pprint
import requests
__A = '''https://zenquotes.io/api'''
def lowercase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowercase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
__A = random_quotes()
pprint.pprint(response) | 366 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : str=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ):
__lowerCamelCase : Any = size if size is not None else {"shortest_edge": 18}
__lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : List[Any] = image_std
def lowerCamelCase__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 366 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( __snake_case : Callable[[int | float], int | float] , __snake_case : int | float , __snake_case : int | float , __snake_case : int = 100 , ) -> float:
"""simple docstring"""
lowerCamelCase_ =x_start
lowerCamelCase_ =fnc(__snake_case )
lowerCamelCase_ =0.0
for _ in range(__snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCamelCase_ =(x_end - x_start) / steps + xa
lowerCamelCase_ =fnc(__snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCamelCase_ =xa
lowerCamelCase_ =fxa
return length
if __name__ == "__main__":
def a_ ( __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
a_ : Tuple = 10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 676 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = """Hello, World!"""
UpperCamelCase__ = """en_XX"""
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool ):
__lowerCAmelCase = Path("data_bin" )
__lowerCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE_ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE_ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE_ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = xmod.model.encoder.sentence_encoder
__lowerCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE_ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCAmelCase = xmod_sent_encoder.embed_positions.weight
__lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase = model.roberta.encoder.layer[i]
__lowerCAmelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__lowerCAmelCase = xmod_layer.self_attn.q_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.q_proj.bias
__lowerCAmelCase = xmod_layer.self_attn.k_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.k_proj.bias
__lowerCAmelCase = xmod_layer.self_attn.v_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__lowerCAmelCase = xmod_layer.self_attn.out_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.out_proj.bias
__lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__lowerCAmelCase = xmod_layer.fca.weight
__lowerCAmelCase = xmod_layer.fca.bias
# output
__lowerCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__lowerCAmelCase = xmod_layer.fca.weight
__lowerCAmelCase = xmod_layer.fca.bias
__lowerCAmelCase = xmod_layer.final_layer_norm.weight
__lowerCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCAmelCase = xmod_layer.adapter_layer_norm.weight
__lowerCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCAmelCase = bert_output.adapter_modules[lang_code]
__lowerCAmelCase = xmod_layer.adapter_modules[lang_code]
__lowerCAmelCase = from_adapter.fca.weight
__lowerCAmelCase = from_adapter.fca.bias
__lowerCAmelCase = from_adapter.fca.weight
__lowerCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCAmelCase = xmod_sent_encoder.layer_norm.weight
__lowerCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["mnli"].dense.weight
__lowerCAmelCase = xmod.model.classification_heads["mnli"].dense.bias
__lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.weight
__lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase = xmod.model.encoder.lm_head.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase = xmod.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )[0]
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE_ ) )
else:
__lowerCAmelCase = xmod.model(SCREAMING_SNAKE_CASE_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCAmelCase = torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 552 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase__ = """."""
if __name__ == "__main__":
UpperCamelCase__ = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
UpperCamelCase__ = []
UpperCamelCase__ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase__ = line.strip()
UpperCamelCase__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase__ = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 552 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase_ : str = ' \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
a_ : Optional[int] = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : Any = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> Tuple:
a_ : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
a_ : Optional[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
a_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
a_ : Tuple = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_ )
a_ : int = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(UpperCAmelCase_ , 'w' , newline='\n' ) as f:
f.write(UpperCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'r' ) as f:
self.assertTrue(f.read() , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , UpperCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , UpperCAmelCase_ ) , )
# Copy consistency with a really long name
a_ : Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , UpperCAmelCase_ , UpperCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , UpperCAmelCase_ , overwrite_result=re.sub('DDPM' , 'Test' , UpperCAmelCase_ ) , )
| 570 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str )-> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="test-config" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCamelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
UpperCamelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
UpperCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCamelCase = c.n_embd + 1 # int
UpperCamelCase = c.resid_pdrop + 1.0 # float
UpperCamelCase = not c.scale_attn_weights # bool
UpperCamelCase = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(UpperCAmelCase_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[str]:
"""simple docstring"""
UpperCamelCase = PretrainedConfig()
UpperCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
UpperCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(UpperCAmelCase_ )}." )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head:
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> int:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained("bert-base-cased" )
UpperCamelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCamelCase = ["config.42.0.0.json"]
UpperCamelCase = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase_ , "config.42.0.0.json" ) )
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Dict:
"""simple docstring"""
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCamelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
UpperCamelCase = "v4.0.0"
UpperCamelCase , UpperCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCamelCase = "v3.0.0"
UpperCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 554 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) -> Union[str, Any]:
lowercase : List[Any] = parent
lowercase : Optional[int] = batch_size
lowercase : Optional[int] = seq_length
lowercase : str = is_training
lowercase : int = use_attention_mask
lowercase : List[str] = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : Dict = vocab_size
lowercase : List[Any] = hidden_size
lowercase : List[str] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : int = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : List[str] = type_vocab_size
lowercase : Dict = type_sequence_label_size
lowercase : Optional[int] = initializer_range
lowercase : int = num_choices
def a__ ( self ) -> Union[str, Any]:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_attention_mask:
lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> Dict:
lowercase : List[str] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def a__ ( self ) -> int:
lowercase : Optional[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any = config_and_inputs
lowercase : Optional[int] = True
lowercase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCamelCase ( __a , unittest.TestCase):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> int:
lowercase : int = FlaxBertModelTester(self )
@slow
def a__ ( self ) -> Tuple:
lowercase : str = FlaxBertModel.from_pretrained("bert-base-cased" )
lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
| 719 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A ,A ,A ) -> Dict:
def get_masked_lm_array(A ):
lowercase : List[Any] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase : str = tf.train.load_variable(A ,A )
if "kernel" in name:
lowercase : Optional[Any] = array.transpose()
return torch.from_numpy(A )
def get_encoder_array(A ):
lowercase : List[Any] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase : str = tf.train.load_variable(A ,A )
if "kernel" in name:
lowercase : List[Any] = array.transpose()
return torch.from_numpy(A )
def get_encoder_layer_array(A ,A ):
lowercase : str = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase : Dict = tf.train.load_variable(A ,A )
if "kernel" in name:
lowercase : Optional[Any] = array.transpose()
return torch.from_numpy(A )
def get_encoder_attention_layer_array(A ,A ,A ):
lowercase : Tuple = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase : Dict = tf.train.load_variable(A ,A )
lowercase : Optional[Any] = array.reshape(A )
if "kernel" in name:
lowercase : Any = array.transpose()
return torch.from_numpy(A )
print(F'''Loading model based on config from {config_path}...''' )
lowercase : Union[str, Any] = BertConfig.from_json_file(A )
lowercase : str = BertForMaskedLM(A )
# Layers
for layer_index in range(0 ,config.num_hidden_layers ):
lowercase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase : BertSelfAttention = layer.attention.self
lowercase : Optional[int] = get_encoder_attention_layer_array(
A ,"_query_dense/kernel" ,self_attn.query.weight.data.shape )
lowercase : Optional[Any] = get_encoder_attention_layer_array(
A ,"_query_dense/bias" ,self_attn.query.bias.data.shape )
lowercase : Union[str, Any] = get_encoder_attention_layer_array(
A ,"_key_dense/kernel" ,self_attn.key.weight.data.shape )
lowercase : Optional[int] = get_encoder_attention_layer_array(
A ,"_key_dense/bias" ,self_attn.key.bias.data.shape )
lowercase : Any = get_encoder_attention_layer_array(
A ,"_value_dense/kernel" ,self_attn.value.weight.data.shape )
lowercase : Tuple = get_encoder_attention_layer_array(
A ,"_value_dense/bias" ,self_attn.value.bias.data.shape )
# Self-attention Output
lowercase : BertSelfOutput = layer.attention.output
lowercase : List[Any] = get_encoder_attention_layer_array(
A ,"_output_dense/kernel" ,self_output.dense.weight.data.shape )
lowercase : Tuple = get_encoder_attention_layer_array(
A ,"_output_dense/bias" ,self_output.dense.bias.data.shape )
lowercase : Union[str, Any] = get_encoder_layer_array(A ,"_attention_layer_norm/gamma" )
lowercase : Optional[int] = get_encoder_layer_array(A ,"_attention_layer_norm/beta" )
# Intermediate
lowercase : BertIntermediate = layer.intermediate
lowercase : int = get_encoder_layer_array(A ,"_intermediate_dense/kernel" )
lowercase : int = get_encoder_layer_array(A ,"_intermediate_dense/bias" )
# Output
lowercase : BertOutput = layer.output
lowercase : Any = get_encoder_layer_array(A ,"_output_dense/kernel" )
lowercase : Dict = get_encoder_layer_array(A ,"_output_dense/bias" )
lowercase : Optional[int] = get_encoder_layer_array(A ,"_output_layer_norm/gamma" )
lowercase : Tuple = get_encoder_layer_array(A ,"_output_layer_norm/beta" )
# Embeddings
lowercase : Tuple = get_encoder_array("_position_embedding_layer/embeddings" )
lowercase : Optional[int] = get_encoder_array("_type_embedding_layer/embeddings" )
lowercase : Dict = get_encoder_array("_embedding_norm_layer/gamma" )
lowercase : List[Any] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
lowercase : List[str] = model.cls.predictions.transform
lowercase : Tuple = get_masked_lm_array("dense/kernel" )
lowercase : List[Any] = get_masked_lm_array("dense/bias" )
lowercase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
lowercase : str = get_masked_lm_array("layer_norm/beta" )
lowercase : Any = get_masked_lm_array("embedding_table" )
# Pooling
lowercase : Optional[int] = BertPooler(config=A )
lowercase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
lowercase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(A )
# Integration test - should load without any errors ;)
lowercase : Any = BertForMaskedLM.from_pretrained(A )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowerCAmelCase : Dict = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 425 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ :str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ :int = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ :Tuple = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ :Dict = matrix[1][1], matrix[0][0]
UpperCAmelCase_ :Dict = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ :str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
UpperCAmelCase_ :List[str] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ :Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ :Optional[int] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ :List[str] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ :Union[str, Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ :Optional[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ :Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ :Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ :Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ :Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ :Dict = array(__snake_case )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ :int = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ :str = array(__snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__snake_case )
# Calculate the inverse of the matrix
return [[float(d(__snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 608 |
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : str= data
lowercase__ : Node | None= None
lowercase__ : Node | None= None
def lowercase__(A ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase__(A ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase__(A ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase__() ->None: # Main function for testing.
"""simple docstring"""
lowercase__ : int= Node(1 )
lowercase__ : Union[str, Any]= Node(2 )
lowercase__ : Optional[int]= Node(3 )
lowercase__ : Optional[Any]= Node(4 )
lowercase__ : Optional[Any]= Node(5 )
lowercase__ : Tuple= Node(6 )
lowercase__ : Any= Node(7 )
lowercase__ : Tuple= Node(8 )
lowercase__ : List[Any]= Node(9 )
print(is_full_binary_tree(A ) )
print(depth_of_tree(A ) )
print("Tree is: " )
display(A )
if __name__ == "__main__":
main()
| 218 | 0 |
'''simple docstring'''
_UpperCAmelCase : int = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCAmelCase : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCAmelCase : List[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 474 |
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase, lowerCamelCase):
def brightness(lowerCamelCase) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''')
return img.point(lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_UpperCAmelCase : str = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 474 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase ) | 142 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[str] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = "mctct"
def __init__( self , UpperCamelCase__=8_065 , UpperCamelCase__=1_536 , UpperCamelCase__=36 , UpperCamelCase__=6_144 , UpperCamelCase__=4 , UpperCamelCase__=384 , UpperCamelCase__=920 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.3 , UpperCamelCase__="relu" , UpperCamelCase__=0.02 , UpperCamelCase__=0.3 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=(7,) , UpperCamelCase__=(3,) , UpperCamelCase__=80 , UpperCamelCase__=1 , UpperCamelCase__=None , UpperCamelCase__="sum" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layerdrop
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = conv_glu_dim
lowerCamelCase_ = conv_dropout
lowerCamelCase_ = num_conv_layers
lowerCamelCase_ = input_feat_per_channel
lowerCamelCase_ = input_channels
lowerCamelCase_ = conv_channels
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase_ = list(UpperCamelCase__ )
lowerCamelCase_ = list(UpperCamelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) | 142 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__a = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
__a = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = calculate_rouge(a_, a_, bootstrap_aggregation=a_, rouge_keys=["rouge2", "rougeL"] )
assert isinstance(a_, a_ )
_UpperCAmelCase : str = calculate_rouge(a_, a_, bootstrap_aggregation=a_, rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = "rougeLsum"
_UpperCAmelCase : Optional[Any] = calculate_rouge(a_, a_, newline_sep=a_, rouge_keys=[k] )[k]
_UpperCAmelCase : Union[str, Any] = calculate_rouge(a_, a_, newline_sep=a_, rouge_keys=[k] )[k]
assert score > score_no_sep
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = ["rouge1", "rouge2", "rougeL"]
_UpperCAmelCase : List[str] = calculate_rouge(a_, a_, newline_sep=a_, rouge_keys=a_ )
_UpperCAmelCase : str = calculate_rouge(a_, a_, newline_sep=a_, rouge_keys=a_ )
assert score_sep == score_no_sep
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
_UpperCAmelCase : Union[str, Any] = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(a_, a_, newline_sep=a_ ) == calculate_rouge(a_, a_, newline_sep=a_ )
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[Any] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
_UpperCAmelCase : Optional[int] = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
_UpperCAmelCase : List[Any] = calculate_rouge(a_, a_, rouge_keys=["rougeLsum"], newline_sep=a_ )["rougeLsum"]
_UpperCAmelCase : Union[str, Any] = calculate_rouge(a_, a_, rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = Path("examples/seq2seq/test_data/wmt_en_ro" )
_UpperCAmelCase : Tuple = calculate_rouge_path(data_dir.joinpath("test.source" ), data_dir.joinpath("test.target" ) )
assert isinstance(a_, a_ )
_UpperCAmelCase : Optional[int] = calculate_rouge_path(
data_dir.joinpath("test.source" ), data_dir.joinpath("test.target" ), bootstrap_aggregation=a_ )
assert isinstance(a_, a_ ) | 704 | '''simple docstring'''
from __future__ import annotations
import bisect
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
if hi < 0:
_UpperCAmelCase : int = len(a_ )
while lo < hi:
_UpperCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : str = mid + 1
else:
_UpperCAmelCase : int = mid
return lo
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
if hi < 0:
_UpperCAmelCase : str = len(a_ )
while lo < hi:
_UpperCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Union[str, Any] = mid
return lo
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
sorted_collection.insert(bisect_left(a_, a_, a_, a_ ), a_ )
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
sorted_collection.insert(bisect_right(a_, a_, a_, a_ ), a_ )
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Tuple = len(a_ ) - 1
while left <= right:
_UpperCAmelCase : List[str] = left + (right - left) // 2
_UpperCAmelCase : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : Optional[int] = midpoint - 1
else:
_UpperCAmelCase : Union[str, Any] = midpoint + 1
return None
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : int = bisect.bisect_left(a_, a_ )
if index != len(a_ ) and sorted_collection[index] == item:
return index
return None
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int, a_: int ):
if right < left:
return None
_UpperCAmelCase : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a_, a_, a_, midpoint - 1 )
else:
return binary_search_by_recursion(a_, a_, midpoint + 1, a_ )
if __name__ == "__main__":
__a = input('Enter numbers separated by comma:\n').strip()
__a = sorted(int(item) for item in user_input.split(','))
__a = int(input('Enter a single number to be found in the list:\n'))
__a = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 257 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Tuple = 12_8022
__snake_case : int = 12_8028
@require_sentencepiece
class A ( lowerCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = MaMaaaTokenizer
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> str:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_a = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> List[Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowercase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Dict:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowercase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(lowercase__ )
self.assertEqual(lowercase__ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> str:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Dict = "facebook/m2m100_418M"
__UpperCAmelCase : Any = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__UpperCAmelCase : Dict = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
__UpperCAmelCase : int = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(lowercase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowercase__ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowercase__ )
_a = MaMaaaTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowercase__ )
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> str:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowercase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 131 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__A : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__A : Optional[int] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__A : Any = '|'.join(sys.argv[1:])
__A : Optional[int] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__A : Tuple = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 575 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCAmelCase__ : int = {'''mobilebert-uncased''': 5_12}
lowerCAmelCase__ : Optional[int] = {}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = MobileBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="[UNK]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[PAD]" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ : Any = getattr(__UpperCamelCase , normalizer_state.pop('type' ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : str = strip_accents
snake_case__ : List[Any] = tokenize_chinese_chars
snake_case__ : Union[str, Any] = normalizer_class(**__UpperCamelCase )
snake_case__ : Any = do_lower_case
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Tuple = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 699 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]:
snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]:
snake_case__ : str = random.randint(0 , len(A__ ) - 1 )
snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : Optional[Any] = random.choice(A__ )
return "".join(A__ )
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]:
snake_case__ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case__ : str = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
snake_case__ : Any = population_score[random.randint(0 , A__ )][0]
snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A__ )
# Generate random starting population.
snake_case__ : Union[str, Any] = []
for _ in range(A__ ):
population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
snake_case__ : str = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase__ : Optional[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def a__ ( UpperCamelCase_ : str ):
UpperCAmelCase__ :Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase__ :Optional[Any] = 192
UpperCAmelCase__ :Union[str, Any] = 768
UpperCAmelCase__ :Dict = 12
UpperCAmelCase__ :Dict = 3
UpperCAmelCase__ :Optional[Any] = [800, 1_333]
UpperCAmelCase__ :Tuple = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase__ :Optional[int] = 330
UpperCAmelCase__ :Optional[int] = 14
UpperCAmelCase__ :List[str] = 6
UpperCAmelCase__ :Optional[Any] = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase__ :List[Any] = 384
UpperCAmelCase__ :List[str] = 1_536
UpperCAmelCase__ :Tuple = 12
UpperCAmelCase__ :Optional[Any] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase__ :List[str] = [800, 1_344]
UpperCAmelCase__ :Optional[int] = 91
UpperCAmelCase__ :Optional[int] = '''huggingface/label-files'''
UpperCAmelCase__ :str = '''coco-detection-id2label.json'''
UpperCAmelCase__ :Tuple = json.load(open(hf_hub_download(A__, A__, repo_type='''dataset''' ), '''r''' ) )
UpperCAmelCase__ :Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ :int = idalabel
UpperCAmelCase__ :Tuple = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCamelCase_ : dict, UpperCamelCase_ : YolosConfig, UpperCamelCase_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ :str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ :Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ :Union[str, Any] = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase__ :str = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ :Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ :Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ :str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase__ :Any = in_proj_bias[-config.hidden_size :]
def a__ ( UpperCamelCase_ : str ):
if "backbone" in name:
UpperCAmelCase__ :Optional[Any] = name.replace('''backbone''', '''vit''' )
if "cls_token" in name:
UpperCAmelCase__ :Optional[int] = name.replace('''cls_token''', '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase__ :Union[str, Any] = name.replace('''det_token''', '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase__ :str = name.replace('''mid_pos_embed''', '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase__ :Dict = name.replace('''pos_embed''', '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase__ :Union[str, Any] = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase__ :Optional[int] = name.replace('''blocks''', '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase__ :Dict = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase__ :List[str] = name.replace('''attn''', '''attention.self''' )
if "norm1" in name:
UpperCAmelCase__ :Union[str, Any] = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase__ :Optional[Any] = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase__ :Any = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase__ :str = name.replace('''mlp.fc2''', '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase__ :Optional[int] = name.replace('''class_embed''', '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase__ :Optional[int] = name.replace('''bbox_embed''', '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase__ :Any = name.replace('''vit.norm''', '''vit.layernorm''' )
return name
def a__ ( UpperCamelCase_ : dict, UpperCamelCase_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ :Union[str, Any] = orig_state_dict.pop(A__ )
if "qkv" in key:
UpperCAmelCase__ :List[Any] = key.split('''.''' )
UpperCAmelCase__ :Optional[int] = int(key_split[2] )
UpperCAmelCase__ :Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase__ :int = val[:dim, :]
UpperCAmelCase__ :Any = val[
dim : dim * 2, :
]
UpperCAmelCase__ :List[str] = val[-dim:, :]
else:
UpperCAmelCase__ :List[str] = val[:dim]
UpperCAmelCase__ :str = val[dim : dim * 2]
UpperCAmelCase__ :Dict = val[-dim:]
else:
UpperCAmelCase__ :Tuple = val
return orig_state_dict
def a__ ( ):
UpperCAmelCase__ :Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ :int = Image.open(requests.get(A__, stream=A__ ).raw )
return im
@torch.no_grad()
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : bool = False ):
UpperCAmelCase__ :Optional[Any] = get_yolos_config(A__ )
# load original state_dict
UpperCAmelCase__ :int = torch.load(A__, map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase__ :Union[str, Any] = YolosForObjectDetection(A__ )
model.eval()
UpperCAmelCase__ :Union[str, Any] = convert_state_dict(A__, A__ )
model.load_state_dict(A__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase__ :Optional[Any] = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase__ :List[str] = YolosImageProcessor(format='''coco_detection''', size=A__ )
UpperCAmelCase__ :List[Any] = image_processor(images=prepare_img(), return_tensors='''pt''' )
UpperCAmelCase__ :Tuple = model(**A__ )
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = outputs.logits, outputs.pred_boxes
UpperCAmelCase__ , UpperCAmelCase__ :Dict = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase__ :Any = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
UpperCAmelCase__ :Optional[Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase__ :List[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
UpperCAmelCase__ :Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase__ :Any = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
UpperCAmelCase__ :Dict = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase__ :Optional[Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
UpperCAmelCase__ :Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase__ :Union[str, Any] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
UpperCAmelCase__ :Tuple = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3], A__, atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3], A__, atol=1e-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
UpperCAmelCase__ :Any = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase__ :str = model_mapping[yolos_name]
image_processor.push_to_hub(A__, organization='''hustvl''' )
model.push_to_hub(A__, organization='''hustvl''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 467 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": 512,
}
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = LxmertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_snake_case )
UpperCAmelCase = do_lower_case
def snake_case_ ( self , _snake_case , _snake_case=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 254 | 0 |
from typing import Any
import numpy as np
def __UpperCAmelCase ( snake_case_ : Optional[int] ):
'''simple docstring'''
return np.array_equal(snake_case_ , matrix.conjugate().T )
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: Union[str, Any] = v.conjugate().T
UpperCAmelCase: List[Any] = v_star.dot(snake_case_ )
assert isinstance(snake_case_ , np.ndarray )
return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ ))
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: Optional[int] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCAmelCase: Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case_ ), F'{a} is not hermitian.'
print(rayleigh_quotient(snake_case_ , snake_case_ ) )
UpperCAmelCase: List[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case_ ), F'{a} is not hermitian.'
assert rayleigh_quotient(snake_case_ , snake_case_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Optional[int] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Optional[int]=8 ):
'''simple docstring'''
UpperCAmelCase: Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase: Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCamelCase ( lowercase ):
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
UpperCAmelCase: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if latents is None:
UpperCAmelCase: Tuple = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCAmelCase: str = latents.to(__snake_case )
UpperCAmelCase: Tuple = latents * scheduler.init_noise_sigma
return latents
def A__ ( self , __snake_case=0 ) -> str:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase: Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
UpperCAmelCase: Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def A__ ( self , __snake_case=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase: Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase: str = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
UpperCAmelCase: Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ) -> Any:
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case = 5_1_2 , __snake_case = 5_1_2 , __snake_case = 1_0_0 , __snake_case = 4.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Optional[int] = self._execution_device
UpperCAmelCase: Optional[int] = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: int = torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: List[Any] = torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: List[Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase: Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase: Dict = image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Dict = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Tuple = hint.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
UpperCAmelCase: Any = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
UpperCAmelCase: Any = self.scheduler.timesteps
UpperCAmelCase: List[str] = self.movq.config.latent_channels
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
UpperCAmelCase: Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase: List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase: str = {"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase: Any = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase: str = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase: Dict = variance_pred.chunk(2 )
UpperCAmelCase: List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase: Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase: Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase: int = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
UpperCAmelCase: Optional[Any] = self.movq.decode(__snake_case , force_not_quantize=__snake_case )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCAmelCase: Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase: Union[str, Any] = image.clamp(0 , 1 )
UpperCAmelCase: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase: Dict = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 166 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ) -> tuple[list[int], int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [randint(-1000 , 1000 ) for i in range(10 )]
__UpperCAmelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
lowerCAmelCase = make_dataset()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(lowercase_ , 3 ):
if sum(lowercase_ ) == target:
return tuple(sorted(lowercase_ ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
__UpperCAmelCase : Union[str, Any] = len(lowercase_ )
for i in range(n - 1 ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ) -> tuple[float, float]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__UpperCAmelCase : int = '''
triplet_sum1(*dataset)
'''
__UpperCAmelCase : Union[str, Any] = '''
triplet_sum2(*dataset)
'''
__UpperCAmelCase : Any = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10000 )
__UpperCAmelCase : Dict = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10000 )
return (min(lowercase_ ), min(lowercase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 462 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = StableUnCLIPImgaImgPipeline
_lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCAmelCase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase : str = frozenset([] )
def A( self):
__UpperCAmelCase : Any = 3_2
__UpperCAmelCase : Union[str, Any] = embedder_hidden_size
# image encoding components
__UpperCAmelCase : Union[str, Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2)
torch.manual_seed(0)
__UpperCAmelCase : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase__ , projection_dim=lowercase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCAmelCase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=lowercase__)
__UpperCAmelCase : Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCAmelCase : int = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__UpperCAmelCase : Tuple = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = AutoencoderKL()
__UpperCAmelCase : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def A( self , lowercase__ , lowercase__=0 , lowercase__=True):
if str(lowercase__).startswith('''mps'''):
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(lowercase__)
else:
__UpperCAmelCase : int = torch.Generator(device=lowercase__).manual_seed(lowercase__)
__UpperCAmelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__)).to(lowercase__)
if pil_image:
__UpperCAmelCase : int = input_image * 0.5 + 0.5
__UpperCAmelCase : str = input_image.clamp(0 , 1)
__UpperCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCAmelCase : Union[str, Any] = DiffusionPipeline.numpy_to_pil(lowercase__)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A( self):
__UpperCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : List[Any] = StableUnCLIPImgaImgPipeline(**lowercase__)
__UpperCAmelCase : List[str] = sd_pipe.to(lowercase__)
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase__)
inputs.update({'''image_embeds''': None})
__UpperCAmelCase : Any = sd_pipe(**lowercase__).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__)
def A( self):
__UpperCAmelCase : str = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A( self):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase__)
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A( self):
__UpperCAmelCase : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCAmelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase : Any = pipe(lowercase__ , '''anime turle''' , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCAmelCase : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCAmelCase : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase : int = pipe(lowercase__ , '''anime turle''' , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCAmelCase : Optional[int] = pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : int = pipe(
lowercase__ , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 462 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCamelCase ( self : int ) -> str:
UpperCAmelCase_ = self.dummy_uncond_unet
UpperCAmelCase_ = KarrasVeScheduler()
UpperCAmelCase_ = KarrasVePipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(num_inference_steps=2 , generator=lowerCAmelCase_ , output_type='''numpy''' ).images
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(num_inference_steps=2 , generator=lowerCAmelCase_ , output_type='''numpy''' , return_dict=lowerCAmelCase_ )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ = UNetaDModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ = KarrasVeScheduler()
UpperCAmelCase_ = KarrasVePipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(num_inference_steps=20 , generator=lowerCAmelCase_ , output_type='''numpy''' ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 707 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ = quote(__magic_name__ )
return hfh.hf_hub_url(__magic_name__ , __magic_name__ , repo_type='''dataset''' , revision=__magic_name__ )
| 407 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase : Dict = '__DUMMY_TRANSFORMERS_USER__'
lowercase : Union[str, Any] = 'Dummy User'
lowercase : List[Any] = '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'
lowercase : Dict = 'https://hub-ci.huggingface.co'
lowercase : int = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
lowercase : Tuple = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
lowercase : Tuple = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , SCREAMING_SNAKE_CASE__)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple) -> Dict:
'''simple docstring'''
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
return HfApi(endpoint=SCREAMING_SNAKE_CASE__)
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Union[str, Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase : Any):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase : Union[str, Any]):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__)
return _temporary_repo
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]) -> int:
'''simple docstring'''
__UpperCamelCase : Any = F'repo_txt_data-{int(time.time() * 10e3)}'
__UpperCamelCase : str = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE__)
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__) , path_in_repo="data/text_data.txt" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : str = F'repo_zipped_txt_data-{int(time.time() * 10e3)}'
__UpperCamelCase : str = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE__)
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__) , path_in_repo="data.zip" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : int) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Any = F'repo_zipped_img_data-{int(time.time() * 10e3)}'
__UpperCamelCase : Optional[int] = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE__)
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__) , path_in_repo="data.zip" , repo_id=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 557 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase) -> Dict:
__UpperCamelCase :Optional[int] = metric_id
class lowerCamelCase_ :
'''simple docstring'''
a__ : List[str] = [MetricMock(UpperCAmelCase_ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def UpperCamelCase__ ( self) -> Union[str, Any]:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "tmp_path" in args:
__UpperCamelCase :Any = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(SCREAMING_SNAKE_CASE , match='''https://huggingface.co/docs/evaluate''' ):
func(*SCREAMING_SNAKE_CASE )
| 452 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowercase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowercase ( unittest.TestCase ):
def __a ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __a ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase ,lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = sd_pipe.prepare_inputs(__lowerCamelCase )
lowercase = replicate(__lowerCamelCase )
lowercase = shard(__lowerCamelCase )
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(__lowerCamelCase , jax.device_count() )
lowercase = sd_pipe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , num_inference_steps=25 , jit=__lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = '''stabilityai/stable-diffusion-2'''
lowercase ,lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
lowercase ,lowercase = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCamelCase , scheduler=__lowerCamelCase , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase = scheduler_params
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = sd_pipe.prepare_inputs(__lowerCamelCase )
lowercase = replicate(__lowerCamelCase )
lowercase = shard(__lowerCamelCase )
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(__lowerCamelCase , jax.device_count() )
lowercase = sd_pipe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , num_inference_steps=25 , jit=__lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 604 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A_ = imread(R"digital_image_processing/image_data/lena_small.jpg")
A_ = cvtColor(img, COLOR_BGR2GRAY)
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
lowercase = cn.convert_to_negative(UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase, 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
lowercase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
lowercase = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase = canny.canny(UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
assert gg.gaussian_filter(UpperCAmelCase, 5, sigma=0.9 ).all()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
lowercase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase = conv.img_convolve(UpperCAmelCase, UpperCAmelCase ).astype(UpperCAmelCase )
assert res.any()
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
assert med.median_filter(UpperCAmelCase, 3 ).any()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
lowercase ,lowercase = sob.sobel_filter(UpperCAmelCase )
assert grad.any() and theta.any()
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
lowercase = sp.make_sepia(UpperCAmelCase, 20 )
assert sepia.all()
def __UpperCAmelCase ( UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" )-> str:
"""simple docstring"""
lowercase = bs.Burkes(imread(UpperCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def __UpperCAmelCase ( UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg", )-> str:
"""simple docstring"""
lowercase = rs.NearestNeighbour(imread(UpperCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
lowercase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowercase = imread(UpperCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowercase = 0
lowercase = 0
lowercase = image[x_coordinate][y_coordinate]
lowercase = lbp.get_neighbors_pixel(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowercase = lbp.local_binary_value(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
assert lbp_image.any()
| 604 | 1 |
'''simple docstring'''
import math
import qiskit
def _UpperCamelCase ( __UpperCamelCase = 1 ,__UpperCamelCase = 1 ,__UpperCamelCase = 1 ) -> int:
'''simple docstring'''
if (
isinstance(lowerCamelCase_ ,lowerCamelCase_ )
or isinstance(lowerCamelCase_ ,lowerCamelCase_ )
or isinstance(lowerCamelCase_ ,lowerCamelCase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
lowerCamelCase_ = qiskit.QuantumRegister(4 ,'qr' )
lowerCamelCase_ = qiskit.ClassicalRegister(2 ,'cr' )
# list the entries
lowerCamelCase_ = [input_a, input_a, carry_in]
lowerCamelCase_ = qiskit.QuantumCircuit(lowerCamelCase_ ,lowerCamelCase_ )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,lowerCamelCase_ ) # measure the last two qbits
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
lowerCamelCase_ = qiskit.execute(lowerCamelCase_ ,lowerCamelCase_ ,shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 713 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 768 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 384 | 0 |
"""simple docstring"""
import operator as op
_lowercase = '''scaler.pt'''
_lowercase = '''pytorch_model'''
_lowercase = '''random_states'''
_lowercase = '''optimizer'''
_lowercase = '''scheduler'''
_lowercase = '''pytorch_model.bin'''
_lowercase = '''pytorch_model.bin.index.json'''
_lowercase = '''model.safetensors'''
_lowercase = '''model.safetensors.index.json'''
_lowercase = '''1.10.2'''
_lowercase = '''py38'''
_lowercase = '''4.17.0'''
_lowercase = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
_lowercase = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
_lowercase = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
_lowercase = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
_lowercase = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
_lowercase = '''2.0.1'''
_lowercase = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
_lowercase = ['''default''', '''reduce-overhead''', '''max-autotune''']
_lowercase = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowercase = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
_lowercase = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
_lowercase = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 118 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase = pd.read_csv('''sample_data.csv''', header=None)
_lowercase = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase = df.iloc[:, 1:2]
_lowercase = actual_data.values.reshape(len_data, 1)
_lowercase = MinMaxScaler().fit_transform(actual_data)
_lowercase = 10
_lowercase = 5
_lowercase = 20
_lowercase = len_data - periods * look_back
_lowercase = actual_data[:division]
_lowercase = actual_data[division - look_back :]
_lowercase , _lowercase = [], []
_lowercase , _lowercase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase = np.array(train_x)
_lowercase = np.array(test_x)
_lowercase = np.array([list(i.ravel()) for i in train_y])
_lowercase = np.array([list(i.ravel()) for i in test_y])
_lowercase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_lowercase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowercase = model.predict(x_test)
| 118 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DebertaTokenizer
_UpperCamelCase : Any = True
_UpperCamelCase : Any = DebertaTokenizerFast
def __a ( self : int ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowercase : int = dict(zip(_A , range(len(_A ) ) ) )
lowercase : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Tuple = {'''unk_token''': '''[UNK]'''}
lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def __a ( self : Any , **_A : Optional[int] ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __a ( self : Tuple , _A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = '''lower newer'''
lowercase : Optional[int] = '''lower newer'''
return input_text, output_text
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : Optional[int] = self.get_tokenizer()
lowercase : Optional[int] = '''lower newer'''
lowercase : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : str = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : str = tokens + [tokenizer.unk_token]
lowercase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def __a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Tuple = tokenizer('''Hello''' , '''World''' )
lowercase : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _A )
@slow
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
lowercase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
lowercase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_A , add_prefix_space=_A )
lowercase : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_A , add_prefix_space=_A )
lowercase : int = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : Tuple = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase : int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase : int = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowercase : Any = tokenizer(_A , padding=_A )
lowercase : Tuple = [tokenizer.decode(_A , skip_special_tokens=_A ) for seq in encoding['''input_ids''']]
# fmt: off
lowercase : List[str] = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase : Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _A )
for expected, decoded in zip(_A , _A ):
self.assertEqual(_A , _A ) | 596 |
from pathlib import Path
import numpy as np
from PIL import Image
def snake_case( __magic_name__ ) -> np.ndarray:
'''simple docstring'''
lowercase , lowercase , lowercase : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def snake_case( __magic_name__ ) -> np.ndarray:
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def snake_case( __magic_name__ , __magic_name__ ) -> np.ndarray:
'''simple docstring'''
lowercase : Any = np.zeros_like(__magic_name__ )
lowercase : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase : int = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
lowerCAmelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase_ = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png') | 596 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase (snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = image.size
lowerCAmelCase , lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCAmelCase = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0
lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self : Tuple , lowerCAmelCase : VQModel , lowerCAmelCase : UNetaDModel , lowerCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : Optional[int] = 100 , lowerCAmelCase : Optional[float] = 0.0 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCAmelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCAmelCase = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCAmelCase = preprocess(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase , lowerCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase = next(self.unet.parameters() ).dtype
lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
lowerCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
for t in self.progress_bar(SCREAMING_SNAKE_CASE__ ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase = torch.cat([latents, image] , dim=1 )
lowerCAmelCase = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
lowerCAmelCase = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE__ ).sample
lowerCAmelCase = torch.clamp(SCREAMING_SNAKE_CASE__ , -1.0 , 1.0 )
lowerCAmelCase = image / 2 + 0.5
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 169 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "vit_msn"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=768 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Dict=3_072 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-0_6 , SCREAMING_SNAKE_CASE__ : Dict=224 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
| 61 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( A_, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DanceDiffusionPipeline
lowerCamelCase__ =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase__ =PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
lowerCamelCase__ =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a_ , use_timestep_embedding=a_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
__snake_case : Optional[int] = IPNDMScheduler()
__snake_case : int = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Dict = torch.manual_seed(a_ )
else:
__snake_case : Dict = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : int = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : List[Any] = self.get_dummy_components()
__snake_case : int = DanceDiffusionPipeline(**a_ )
__snake_case : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Tuple = self.get_dummy_inputs(a_ )
__snake_case : Dict = pipe(**a_ )
__snake_case : Optional[int] = output.audios
__snake_case : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__snake_case : Tuple = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = torch_device
__snake_case : int = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
__snake_case : Optional[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : List[Any] = pipe(generator=a_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
__snake_case : Dict = output.audios
__snake_case : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__snake_case : List[str] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = torch_device
__snake_case : Tuple = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
__snake_case : List[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Dict = torch.manual_seed(0 )
__snake_case : Any = pipe(generator=a_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
__snake_case : str = output.audios
__snake_case : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__snake_case : int = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 708 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
__snake_case , __snake_case : str = head.next, head
while fast and fast.next:
__snake_case : List[str] = fast.next.next
__snake_case : Optional[int] = slow.next
__snake_case : Tuple = slow.next
__snake_case : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
__snake_case : Union[str, Any] = None
while second:
__snake_case : Optional[Any] = second.next
__snake_case : List[str] = node
__snake_case : List[Any] = second
__snake_case : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__snake_case : int = node.next
__snake_case : Any = head.next
return True
def lowercase ( _snake_case : int ) ->Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__snake_case : Optional[int] = head
while fast and fast.next:
__snake_case , __snake_case : Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
__snake_case : int = [slow.val]
while slow.next:
__snake_case : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__snake_case : str = cur.next
return True
def lowercase ( _snake_case : str ) ->Dict:
"""simple docstring"""
if not head or not head.next:
return True
__snake_case : Optional[int] = {}
__snake_case : Dict = 0
while head:
if head.val in d:
d[head.val].append(_snake_case )
else:
__snake_case : Tuple = [pos]
__snake_case : str = head.next
pos += 1
__snake_case : str = pos - 1
__snake_case : Union[str, Any] = 0
for v in d.values():
if len(_snake_case ) % 2 != 0:
middle += 1
else:
__snake_case : Tuple = 0
for i in range(0 , len(_snake_case ) ):
if v[i] + v[len(_snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 229 | 0 |
def a_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
__lowerCAmelCase = str(lowerCAmelCase_ )
__lowerCAmelCase = ''.join(sorted(lowerCAmelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a_ ( lowerCAmelCase_ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
__lowerCAmelCase = 0
__lowerCAmelCase = 1
while True:
if check_bouncy(lowerCAmelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 53 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def a_ ( UpperCamelCase_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A_ = k.replace(UpperCamelCase_ , UpperCamelCase_ )
if k.startswith("encoder" ):
A_ = k.replace(".attn" , ".self_attn" )
A_ = k.replace("norm1" , "self_attn_layer_norm" )
A_ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
A_ = k.replace("norm1" , "self_attn_layer_norm" )
A_ = k.replace("norm2" , "encoder_attn_layer_norm" )
A_ = k.replace("norm3" , "final_layer_norm" )
return k
def a_ ( UpperCamelCase_ ):
A_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
A_ = sd.pop(UpperCamelCase_ )
A_ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
A_ = v
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''START''']
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = torch.load(UpperCamelCase_ , map_location="cpu" )
A_ = model["model"]
A_ = BlenderbotConfig.from_json_file(UpperCamelCase_ )
A_ = BlenderbotForConditionalGeneration(UpperCamelCase_ )
A_ = m.model.state_dict().keys()
A_ = []
A_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A_ = rename_state_dict_key(UpperCamelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase_ )
m.model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
m.half()
m.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 452 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 171 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE_ (pl.LightningModule ):
'''simple docstring'''
def __init__( self : Any , __a : Optional[int] ) ->str:
super().__init__()
lowerCamelCase_ : List[Any] = model
lowerCamelCase_ : List[Any] = 2
lowerCamelCase_ : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self : Any ) ->Tuple:
pass
def __lowerCamelCase ( A__ : str , A__ : str , A__ : str ) -> Any:
# load longformer model from model identifier
lowerCamelCase_ : Tuple = LongformerModel.from_pretrained(A__ )
lowerCamelCase_ : Optional[Any] = LightningModel(A__ )
lowerCamelCase_ : Tuple = torch.load(A__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase_ : List[str] = LongformerForQuestionAnswering.from_pretrained(A__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(A__ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ : Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 171 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_attention_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_choices
def lowerCAmelCase__ ( self ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_attention_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = True
__magic_name__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( a_ , unittest.TestCase ):
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ):
__magic_name__ = FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''roberta-base''' , from_pt=__SCREAMING_SNAKE_CASE )
__magic_name__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 490 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Any , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
__a = size if size is not None else {"height": 3_84, "width": 3_84}
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__a = do_resize
__a = size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ):
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__a = (size["height"], size["width"])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str , ):
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : int , ):
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Any , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = size if size is not None else self.size
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__a = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__a = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__a = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__a = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__a = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__a = BatchFeature(data={"pixel_values": images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 197 | 0 |
import numpy as np
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ : Tuple = np.zeros((n + 1,) )
UpperCAmelCase_ : int = ya
UpperCAmelCase_ : Tuple = xa
for k in range(__lowercase ):
UpperCAmelCase_ : int = f(__lowercase , y[k] )
UpperCAmelCase_ : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Optional[Any] = f(x + h , y[k] + h * ka )
UpperCAmelCase_ : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate | 641 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_snake_case : Any = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class A ( _a ):
lowercase_ = 'albert'
def __init__( self : List[str] , lowerCAmelCase_ : Tuple=3_00_00 , lowerCAmelCase_ : Dict=1_28 , lowerCAmelCase_ : str=40_96 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Tuple=64 , lowerCAmelCase_ : Union[str, Any]=1_63_84 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Union[str, Any]="gelu_new" , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : Tuple=1e-12 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Union[str, Any]="absolute" , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=3 , **lowerCAmelCase_ : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_hidden_groups
_a = num_attention_heads
_a = inner_group_num
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = classifier_dropout_prob
_a = position_embedding_type
class A ( _a ):
@property
def __lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = StableDiffusionInstructPixaPixPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase( self : str )-> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' )
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries'
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(a_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae']
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE__ : Tuple = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 0
def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
pipe(**a_ , callback=a_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase( self : int )-> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) )
SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a_ , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 85 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""vqvae"""]
def __init__( self : Optional[Any] , A : AutoencoderKL , A : UNetaDConditionModel , A : Mel , A : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=A , scheduler=A , mel=A , vqvae=A )
def UpperCAmelCase__ ( self : str ):
return 50 if isinstance(self.scheduler , A ) else 1_000
@torch.no_grad()
def __call__( self : int , A : int = 1 , A : str = None , A : np.ndarray = None , A : int = 0 , A : int = 0 , A : int = None , A : torch.Generator = None , A : float = 0 , A : float = 0 , A : torch.Generator = None , A : float = 0 , A : torch.Tensor = None , A : torch.Tensor = None , A : Optional[int]=True , ):
__snake_case: Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(A )
__snake_case: List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__snake_case: Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__snake_case: Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A , device=self.device , )
__snake_case: List[Any] = noise
__snake_case: Union[str, Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A , A )
__snake_case: Tuple = self.mel.audio_slice_to_image(A )
__snake_case: Dict = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
__snake_case: Union[str, Any] = (input_image / 255) * 2 - 1
__snake_case: int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__snake_case: Tuple = self.vqvae.encode(torch.unsqueeze(A , 0 ) ).latent_dist.sample(
generator=A )[0]
__snake_case: List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__snake_case: Optional[Any] = self.scheduler.add_noise(A , A , self.scheduler.timesteps[start_step - 1] )
__snake_case: str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__snake_case: List[Any] = int(mask_start_secs * pixels_per_second )
__snake_case: Union[str, Any] = int(mask_end_secs * pixels_per_second )
__snake_case: Optional[int] = self.scheduler.add_noise(A , A , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A ):
__snake_case: List[Any] = self.unet(A , A , A )["""sample"""]
else:
__snake_case: str = self.unet(A , A )["""sample"""]
if isinstance(self.scheduler , A ):
__snake_case: int = self.scheduler.step(
model_output=A , timestep=A , sample=A , eta=A , generator=A , )["""prev_sample"""]
else:
__snake_case: str = self.scheduler.step(
model_output=A , timestep=A , sample=A , generator=A , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
__snake_case: Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
__snake_case: Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__snake_case: Any = 1 / self.vqvae.config.scaling_factor * images
__snake_case: Optional[int] = self.vqvae.decode(A )["""sample"""]
__snake_case: int = (images / 2 + 0.5).clamp(0 , 1 )
__snake_case: Dict = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__snake_case: Any = (images * 255).round().astype("""uint8""" )
__snake_case: Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A , mode="""RGB""" ).convert("""L""" ) for _ in images) )
__snake_case: Dict = [self.mel.image_to_audio(A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A )[:, np.newaxis, :] ) , **ImagePipelineOutput(A ) )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , A : List[Image.Image] , A : int = 50 ):
assert isinstance(self.scheduler , A )
self.scheduler.set_timesteps(A )
__snake_case: List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
__snake_case: Optional[Any] = (sample / 255) * 2 - 1
__snake_case: List[Any] = torch.Tensor(A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__snake_case: List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__snake_case: Any = self.scheduler.alphas_cumprod[t]
__snake_case: Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__snake_case: Dict = 1 - alpha_prod_t
__snake_case: int = self.unet(A , A )["""sample"""]
__snake_case: str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__snake_case: List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__snake_case: int = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( A : torch.Tensor , A : torch.Tensor , A : float ):
__snake_case: Any = acos(torch.dot(torch.flatten(A ) , torch.flatten(A ) ) / torch.norm(A ) / torch.norm(A ) )
return sin((1 - alpha) * theta ) * xa / sin(A ) + sin(alpha * theta ) * xa / sin(A )
| 155 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : int , A : List[Any] ):
__snake_case: Optional[int] = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
__snake_case: Optional[int] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , A : int ):
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCAmelCase__ ( self : str , A : Any ):
# create estimator
__snake_case: str = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
__snake_case: Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__snake_case: List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case: Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 155 | 1 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 50 | # limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 240 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =3
SCREAMING_SNAKE_CASE_: List[str] =250
SCREAMING_SNAKE_CASE_: Any =ids_tensor((batch_size, length) , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self._get_tensors(5 )
SCREAMING_SNAKE_CASE_: int =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =MaxLengthCriteria(max_length=10 )
SCREAMING_SNAKE_CASE_: Dict =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Any =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: str =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[str] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self._get_tensors(5 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[int] =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
SCREAMING_SNAKE_CASE_: Optional[int] =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 713 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] = None ):
snake_case : Tuple = tesseract_config if tesseract_config is not None else ""
# apply OCR
snake_case : List[Any] = to_pil_image(__lowerCamelCase )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : Dict = pytesseract.image_to_data(__lowerCamelCase , lang=__lowerCamelCase , output_type="dict" , config=__lowerCamelCase )
snake_case , snake_case , snake_case , snake_case , snake_case : Dict = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
snake_case : Tuple = [idx for idx, word in enumerate(__lowerCamelCase ) if not word.strip()]
snake_case : int = [word for idx, word in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
snake_case : Tuple = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
snake_case : Optional[int] = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
snake_case : str = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
snake_case : str = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : Optional[int] = []
for x, y, w, h in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
snake_case : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCamelCase )
# finally, normalize the bounding boxes
snake_case : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = ["pixel_values"]
def __init__(self : Optional[int] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = "" , **snake_case__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Tuple = size if size is not None else {"height": 2_24, "width": 2_24}
snake_case : str = get_size_dict(snake_case__ )
snake_case : List[Any] = do_resize
snake_case : Dict = size
snake_case : Dict = resample
snake_case : List[Any] = apply_ocr
snake_case : str = ocr_lang
snake_case : List[str] = tesseract_config
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case : str = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case : Optional[int] = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Union[str, Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case : Any = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Any = get_size_dict(snake_case__ )
snake_case : Union[str, Any] = resample if resample is not None else self.resample
snake_case : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : int = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : int = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
snake_case : str = []
snake_case : Dict = []
for image in images:
snake_case , snake_case : Any = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
snake_case : Optional[int] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : Any = [flip_channel_order(snake_case__ ) for image in images]
snake_case : List[str] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
snake_case : List[Any] = words_batch
snake_case : Union[str, Any] = boxes_batch
return data
| 204 |
def UpperCamelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1000 ):
snake_case : int = 1
snake_case : int = 0
for divide_by_number in range(__lowerCamelCase , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
snake_case : List[Any] = len(__lowerCamelCase )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
a__ = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def snake_case__ ( a ) -> Tuple:
'''simple docstring'''
snake_case__ = list(state_dict.keys() )
for name in state_dict_keys:
snake_case__ = state_dict.pop(a )
# emb -> embedding
if name.startswith("""emb.""" ):
snake_case__ = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
snake_case__ = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
snake_case__ = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , a )
# ffn -> feed_forward
snake_case__ = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , a )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
snake_case__ = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
snake_case__ = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
snake_case__ = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
snake_case__ = """rwkv.""" + name
snake_case__ = weight
return state_dict
def snake_case__ ( a , a , a , a=None , a=None , a=False , a=None ) -> Optional[int]:
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
snake_case__ = 5_0277
snake_case__ = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
snake_case__ = PreTrainedTokenizerFast(tokenizer_file=a )
snake_case__ = len(a )
tokenizer.save_pretrained(a )
# 2. Build the config
snake_case__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case__ = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
snake_case__ = RwkvConfig(
vocab_size=a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(a )
# 3. Download model file then convert state_dict
snake_case__ = hf_hub_download(a , a )
snake_case__ = torch.load(a , map_location="""cpu""" )
snake_case__ = convert_state_dict(a )
# 4. Split in shards and save
snake_case__ , snake_case__ = shard_checkpoint(a )
for shard_file, shard in shards.items():
torch.save(a , os.path.join(a , a ) )
if index is not None:
snake_case__ = os.path.join(a , a )
# Save the index as well
with open(a , """w""" , encoding="""utf-8""" ) as f:
snake_case__ = json.dumps(a , indent=2 , sort_keys=a ) + """\n"""
f.write(a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
snake_case__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case__ = torch.load(os.path.join(a , a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(a , a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
snake_case__ = AutoModelForCausalLM.from_pretrained(a )
model.push_to_hub(a , max_shard_size="""2GB""" )
tokenizer.push_to_hub(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
a__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 701 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def snake_case__ ( a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = StableDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
snake_case__ = load_file(a )
snake_case__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
snake_case__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
snake_case__ = pipeline.text_encoder
else:
snake_case__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
snake_case__ = pipeline.unet
# find the target layer
snake_case__ = layer_infos.pop(0 )
while len(a ) > -1:
try:
snake_case__ = curr_layer.__getattr__(a )
if len(a ) > 0:
snake_case__ = layer_infos.pop(0 )
elif len(a ) == 0:
break
except Exception:
if len(a ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
snake_case__ = layer_infos.pop(0 )
snake_case__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(a )
else:
pair_keys.append(a )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
snake_case__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
snake_case__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a , a ).unsqueeze(2 ).unsqueeze(3 )
else:
snake_case__ = state_dict[pair_keys[0]].to(torch.floataa )
snake_case__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a , a )
# update visited list
for item in pair_keys:
visited.append(a )
return pipeline
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
a__ = parser.parse_args()
a__ = args.base_model_path
a__ = args.checkpoint_path
a__ = args.dump_path
a__ = args.lora_prefix_unet
a__ = args.lora_prefix_text_encoder
a__ = args.alpha
a__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
a__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 566 | 0 |
from math import isqrt
def UpperCamelCase ( _A : int )-> list[int]:
"""simple docstring"""
A__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _A , _A ):
A__ = False
return [i for i in range(2 , _A ) if is_prime[i]]
def UpperCamelCase ( _A : int = 10**8 )-> int:
"""simple docstring"""
A__ = calculate_prime_numbers(max_number // 2 )
A__ = 0
A__ = 0
A__ = len(_A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 491 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=768 ):
super().__init__(UpperCAmelCase__ )
A__ = proj_size
A__ = CLIPVisionModel(UpperCAmelCase__ )
A__ = PaintByExampleMapper(UpperCAmelCase__ )
A__ = nn.LayerNorm(config.hidden_size )
A__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
A__ = self.model(pixel_values=UpperCAmelCase__ )
A__ = clip_output.pooler_output
A__ = self.mapper(latent_states[:, None] )
A__ = self.final_layer_norm(UpperCAmelCase__ )
A__ = self.proj_out(UpperCAmelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = (config.num_hidden_layers + 1) // 5
A__ = config.hidden_size
A__ = 1
A__ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , activation_fn="gelu" , attention_bias=UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ )
] )
def __A ( self , UpperCAmelCase__ ):
for block in self.blocks:
A__ = block(UpperCAmelCase__ )
return hidden_states
| 491 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase__ ( __A , __A , unittest.TestCase ):
__UpperCamelCase = AutoencoderKL
__UpperCamelCase = """sample"""
__UpperCamelCase = 1E-2
@property
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[Any] = 4
lowerCAmelCase_ : Any = 3
lowerCAmelCase_ : int = (32, 32)
lowerCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_lowercase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self ):
return (3, 32, 32)
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase__ ( self ):
# enable deterministic behavior for gradient checkpointing
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : List[str] = self.model_class(**_lowercase )
model.to(_lowercase )
assert not model.is_gradient_checkpointing and model.training
lowerCAmelCase_ : Union[str, Any] = model(**_lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCAmelCase_ : Tuple = torch.randn_like(_lowercase )
lowerCAmelCase_ : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCAmelCase_ : List[str] = self.model_class(**_lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCAmelCase_ : str = model_a(**_lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCAmelCase_ : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCAmelCase_ : Dict = dict(model.named_parameters() )
lowerCAmelCase_ : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ , lowerCAmelCase_ : str = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_lowercase )
lowerCAmelCase_ : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowerCAmelCase_ : Optional[Any] = model.to(_lowercase )
model.eval()
if torch_device == "mps":
lowerCAmelCase_ : int = torch.manual_seed(0 )
else:
lowerCAmelCase_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : Tuple = image.to(_lowercase )
with torch.no_grad():
lowerCAmelCase_ : int = model(_lowercase , sample_posterior=_lowercase , generator=_lowercase ).sample
lowerCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCAmelCase_ : Tuple = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowerCAmelCase_ : Tuple = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowercase , _lowercase , rtol=1e-2 ) )
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
return F'gaussian_noise_s={seed}_shape={"_".join([str(_lowercase ) for s in shape] )}.npy'
def UpperCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , _lowercase=0 , _lowercase=(4, 3, 512, 512) , _lowercase=False ):
lowerCAmelCase_ : List[Any] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase_ : int = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) ).to(_lowercase ).to(_lowercase )
return image
def UpperCAmelCase__ ( self , _lowercase="CompVis/stable-diffusion-v1-4" , _lowercase=False ):
lowerCAmelCase_ : Any = """fp16""" if fpaa else None
lowerCAmelCase_ : List[str] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase_ : Union[str, Any] = AutoencoderKL.from_pretrained(
_lowercase , subfolder="""vae""" , torch_dtype=_lowercase , revision=_lowercase , )
model.to(_lowercase ).eval()
return model
def UpperCAmelCase__ ( self , _lowercase=0 ):
if torch_device == "mps":
return torch.manual_seed(_lowercase )
return torch.Generator(device=_lowercase ).manual_seed(_lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : int = self.get_sd_vae_model()
lowerCAmelCase_ : Any = self.get_sd_image(_lowercase )
lowerCAmelCase_ : str = self.get_generator(_lowercase )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(_lowercase , generator=_lowercase , sample_posterior=_lowercase ).sample
assert sample.shape == image.shape
lowerCAmelCase_ : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase_ : List[str] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(_lowercase , _lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=_lowercase )
lowerCAmelCase_ : Any = self.get_sd_image(_lowercase , fpaa=_lowercase )
lowerCAmelCase_ : Dict = self.get_generator(_lowercase )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(_lowercase , generator=_lowercase , sample_posterior=_lowercase ).sample
assert sample.shape == image.shape
lowerCAmelCase_ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase_ : str = torch.tensor(_lowercase )
assert torch_all_close(_lowercase , _lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : List[str] = self.get_sd_vae_model()
lowerCAmelCase_ : Optional[int] = self.get_sd_image(_lowercase )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(_lowercase ).sample
assert sample.shape == image.shape
lowerCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase_ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(_lowercase , _lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Dict = self.get_sd_vae_model()
lowerCAmelCase_ : Optional[int] = self.get_sd_image(_lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase_ : Any = model.decode(_lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCAmelCase_ : str = torch.tensor(_lowercase )
assert torch_all_close(_lowercase , _lowercase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowercase )
lowerCAmelCase_ : Union[str, Any] = self.get_sd_image(_lowercase , shape=(3, 4, 64, 64) , fpaa=_lowercase )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model.decode(_lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCAmelCase_ : int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase_ : Union[str, Any] = torch.tensor(_lowercase )
assert torch_all_close(_lowercase , _lowercase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Optional[int] = self.get_sd_vae_model(fpaa=_lowercase )
lowerCAmelCase_ : List[Any] = self.get_sd_image(_lowercase , shape=(3, 4, 64, 64) , fpaa=_lowercase )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model.decode(_lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase_ : int = model.decode(_lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowercase , _lowercase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : int = self.get_sd_vae_model()
lowerCAmelCase_ : Any = self.get_sd_image(_lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model.decode(_lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase_ : List[str] = model.decode(_lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowercase , _lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Dict = self.get_sd_vae_model()
lowerCAmelCase_ : Any = self.get_sd_image(_lowercase )
lowerCAmelCase_ : int = self.get_generator(_lowercase )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model.encode(_lowercase ).latent_dist
lowerCAmelCase_ : List[str] = dist.sample(generator=_lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCAmelCase_ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCAmelCase_ : Dict = torch.tensor(_lowercase )
lowerCAmelCase_ : List[str] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(_lowercase , _lowercase , atol=_lowercase )
| 440 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Tuple = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 440 | 1 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an \'int\' type""" )
lowercase__ : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | '''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase_ = '''.'''
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCamelCase_ = []
UpperCamelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase_ = line.strip()
UpperCamelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase_ = '''\n'''.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 209 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = (DDIMParallelScheduler,)
A_ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def __UpperCAmelCase ( self : Any , **UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase_ )
return config
def __UpperCAmelCase ( self : List[str] , **UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : List[str] = self.get_scheduler_config(**UpperCamelCase_ )
_lowercase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase : Dict = 10, 0.0
_lowercase : Any = self.dummy_model()
_lowercase : Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
_lowercase : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : List[Any] = self.get_scheduler_config(steps_offset=1 )
_lowercase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : str = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.scheduler_classes[0]
_lowercase : Optional[Any] = self.get_scheduler_config()
_lowercase : List[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase : Tuple = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
_lowercase : List[str] = self.dummy_model()
_lowercase : List[str] = self.dummy_sample_deter
_lowercase : Optional[Any] = self.dummy_sample_deter + 0.1
_lowercase : str = self.dummy_sample_deter - 0.1
_lowercase : str = samplea.shape[0]
_lowercase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowercase : str = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
_lowercase : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowercase : List[str] = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
_lowercase : Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : Any = self.full_loop()
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : int = self.full_loop(prediction_type='v_prediction' )
_lowercase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
_lowercase : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
_lowercase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__: Optional[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_ : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_ : Optional[int] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_ : Dict = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = get_image_size(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = output_size
# determine new height and width
SCREAMING_SNAKE_CASE_ : Tuple = output_height / input_height
SCREAMING_SNAKE_CASE_ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_ : Dict = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_ : Optional[int] = scale_height
SCREAMING_SNAKE_CASE_ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[Any] = ['pixel_values']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = False , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : str = size
SCREAMING_SNAKE_CASE_ : Any = keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : str = ensure_multiple_of
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : Any = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE_ : str = do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = 1 , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ : str = get_resize_output_image_size(
__lowerCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=__lowerCAmelCase , multiple=__lowerCAmelCase , )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Optional[Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Dict = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Tuple = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : int = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : str = []
for idx in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[str] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 345 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
SCREAMING_SNAKE_CASE_ : List[Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE_ : List[str] = [3, 5]
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
for block in range(1 , SCREAMING_SNAKE_CASE ):
for _ in range(SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCAmelCase__: Union[str, Any] = 0
try:
lowerCAmelCase__: List[Any] = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 345 | 1 |
from __future__ import annotations
def A_ ( A__ , A__ ) -> set[str]:
a__ , a__ : int = set(A__ ), [start]
while stack:
a__ : List[Any] = stack.pop()
explored.add(A__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(A__ )
return explored
lowercase : List[str] = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 392 |
import qiskit
def A_ ( A__ , A__ ) -> qiskit.result.counts.Counts:
a__ : str = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
a__ : str = qiskit.QuantumCircuit(A__ , A__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a__ : Dict = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 392 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_0 , __UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TFRegNetModel(config=__UpperCAmelCase )
__lowercase = model(__UpperCAmelCase , training=__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFRegNetForImageClassification(__UpperCAmelCase )
__lowercase = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase : Dict = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Tuple = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFRegNetModelTester(self )
__lowercase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(__UpperCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowercase = model_class(__UpperCAmelCase )
__lowercase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase = layer_type
__lowercase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
__lowercase = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCAmelCase , __UpperCAmelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(__UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowercase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"""output_hidden_states""": True} )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFRegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase__ ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase__( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowercase = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
| 566 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : List[str] = DebertaVaTokenizer
UpperCamelCase : Optional[int] = DebertaVaTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Dict = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """this is a test"""
__lowercase = """this is a test"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """<pad>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__UpperCAmelCase ) , 3_0_0_0_1 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """This is a test"""
__lowercase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowercase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# fmt: off
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowercase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaVaTokenizer(__UpperCAmelCase )
__lowercase = tokenizer.encode("""sequence builders""" )
__lowercase = tokenizer.encode("""multi-sequence build""" )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCAmelCase , )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 566 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
snake_case_ = get_tests_dir('fixtures')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = mock.Mock()
__snake_case = 500
__snake_case = {}
__snake_case = HTTPError
__snake_case = {}
# Download this model to make sure it's in the cache.
__snake_case = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=a__ ) as mock_head:
__snake_case = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def a (cls : Tuple ):
"""simple docstring"""
__snake_case = TOKEN
HfFolder.save_token(a__ )
@classmethod
def a (cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def a (self : int ):
"""simple docstring"""
__snake_case = WavaVecaFeatureExtractor.from_pretrained(a__ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a__ , repo_id='''test-feature-extractor''' , push_to_hub=a__ , use_auth_token=self._token )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = WavaVecaFeatureExtractor.from_pretrained(a__ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
__snake_case = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a__ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=a__ , use_auth_token=self._token )
__snake_case = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
def a (self : List[Any] ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
__snake_case = CustomFeatureExtractor.from_pretrained(a__ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
__snake_case = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=a__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 388 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any=True ) -> Dict:
model.train()
__snake_case = model(snake_case_ )
__snake_case = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=False ) -> Optional[int]:
set_seed(42 )
__snake_case = RegressionModel()
__snake_case = deepcopy(snake_case_ )
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
__snake_case = AdamW(params=model.parameters() , lr=1e-3 )
__snake_case = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__snake_case = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
__snake_case = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__snake_case , __snake_case = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( snake_case_ : Any ) -> str:
# Test when on a single CPU or GPU that the context manager does nothing
__snake_case , __snake_case , __snake_case = get_training_setup(snake_case_ )
# Use a single batch
__snake_case , __snake_case = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCamelCase__ ( snake_case_ : int ) -> Tuple:
# Test on distributed setup that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(snake_case_ )
# Use a single batch
__snake_case , __snake_case = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCamelCase__ ( snake_case_ : int=False , snake_case_ : List[str]=False ) -> Optional[Any]:
__snake_case = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCamelCase__ ( snake_case_ : Dict=False , snake_case_ : Optional[int]=False ) -> Any:
__snake_case = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__snake_case = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ) -> Any:
__snake_case = Accelerator()
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(snake_case_ , batch_size=16 )
__snake_case = RegressionDataset(length=96 )
__snake_case = DataLoader(snake_case_ , batch_size=16 )
__snake_case , __snake_case = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Accelerator()
__snake_case = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 388 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(a_ ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE : List[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | '''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Any = [True] * n
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : int = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
_UpperCAmelCase : Any = i * 2
while index < n:
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = index + i
_UpperCAmelCase : Optional[int] = [2]
for i in range(3, a_, 2 ):
if is_prime[i]:
primes.append(a_ )
return primes
def __UpperCAmelCase ( a_: int = 999_966_663_333 ):
_UpperCAmelCase : Tuple = math.floor(math.sqrt(a_ ) ) + 100
_UpperCAmelCase : Union[str, Any] = prime_sieve(a_ )
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = primes[prime_index]
while (last_prime**2) <= limit:
_UpperCAmelCase : List[str] = primes[prime_index + 1]
_UpperCAmelCase : Any = last_prime**2
_UpperCAmelCase : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
_UpperCAmelCase : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_UpperCAmelCase : Dict = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_UpperCAmelCase : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_UpperCAmelCase : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 494 | 0 |
from ...processing_utils import ProcessorMixin
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''image_processor''', '''feature_extractor''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''TvltImageProcessor'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''TvltFeatureExtractor'''
def __init__( self , snake_case , snake_case ):
super().__init__(image_processor=snake_case , feature_extractor=snake_case )
snake_case_ = image_processor
snake_case_ = feature_extractor
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , snake_case=False , *snake_case , **snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
snake_case_ = None
if images is not None:
snake_case_ = self.image_processor(snake_case , mask_pixel=snake_case , *snake_case , **snake_case )
if images_mixed is not None:
snake_case_ = self.image_processor(snake_case , is_mixed=snake_case , *snake_case , **snake_case )
if audio is not None:
snake_case_ = self.feature_extractor(
snake_case , *snake_case , sampling_rate=snake_case , mask_audio=snake_case , **snake_case )
snake_case_ = {}
if audio is not None:
output_dict.update(snake_case )
if images is not None:
output_dict.update(snake_case )
if images_mixed_dict is not None:
output_dict.update(snake_case )
return output_dict
@property
def a ( self ):
snake_case_ = self.image_processor.model_input_names
snake_case_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 108 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = PhobertTokenizer
__SCREAMING_SNAKE_CASE : int = False
def a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = ['#version: 0.2', 'l à</w>']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , snake_case ):
snake_case_ = 'Tôi là VinAI Research'
snake_case_ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def a ( self ):
snake_case_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = 'Tôi là VinAI Research'
snake_case_ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
snake_case_ = tokenizer.tokenize(snake_case )
print(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
| 108 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_A = "xlnet"
_A = ["mems"]
_A = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Tuple=3_2_0_0_0 , SCREAMING_SNAKE_CASE_ : int=1_0_2_4 , SCREAMING_SNAKE_CASE_ : str=2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : Tuple=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : int="bi" , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1e-1_2 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Any=-1 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str="last" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]="tanh" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Any=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : Any=5 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : int=2 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
_a = vocab_size
_a = d_model
_a = n_layer
_a = n_head
if d_model % n_head != 0:
raise ValueError(f"""\'d_model % n_head\' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_a = d_model // n_head
_a = ff_activation
_a = d_inner
_a = untie_r
_a = attn_type
_a = initializer_range
_a = layer_norm_eps
_a = dropout
_a = mem_len
_a = reuse_len
_a = bi_data
_a = clamp_len
_a = same_length
_a = summary_type
_a = summary_use_proj
_a = summary_activation
_a = summary_last_dropout
_a = start_n_top
_a = end_n_top
_a = bos_token_id
_a = pad_token_id
_a = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , snake_case__ , )
_a = kwargs['use_cache']
_a = use_mems_eval
_a = use_mems_train
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Tuple ):
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 562 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""Salesforce/codegen-350M-mono""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = CodeGenTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
if kwargs.pop('add_bos_token' , snake_case__ ):
_lowerCAmelCase : Tuple = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : List[Any] = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : Dict = add_prefix_space
_lowerCAmelCase : List[Any] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Tuple = add_prefix_space
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Dict = super().decode(
token_ids=snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , )
if truncate_before_pattern is not None and len(snake_case__ ) > 0:
_lowerCAmelCase : Optional[int] = self.truncate(snake_case__ , snake_case__ )
return decoded_text
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
def find_re(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : List[Any] = pattern.search(snake_case__ , snake_case__ )
return m.start() if m else -1
_lowerCAmelCase : Any = [re.compile(snake_case__ , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCAmelCase : Dict = list(re.finditer('^print' , snake_case__ , re.MULTILINE ) )
if len(snake_case__ ) > 1:
_lowerCAmelCase : Optional[int] = completion[: prints[1].start()]
_lowerCAmelCase : Optional[int] = list(re.finditer('^def' , snake_case__ , re.MULTILINE ) )
if len(snake_case__ ) > 1:
_lowerCAmelCase : Optional[Any] = completion[: defs[1].start()]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Tuple = [
pos for pos in [find_re(snake_case__ , snake_case__ , snake_case__ ) for terminal in terminals] if pos != -1
]
if len(snake_case__ ) > 0:
return completion[: min(snake_case__ )]
else:
return completion
| 444 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class a__( snake_case__ ):
a_ : List[Any] = '''conditional_detr'''
a_ : Any = ['''past_key_values''']
a_ : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=300 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case__ =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case__ =backbone_config.get('model_type' )
snake_case__ =CONFIG_MAPPING[backbone_model_type]
snake_case__ =config_class.from_dict(_UpperCAmelCase )
snake_case__ =use_timm_backbone
snake_case__ =backbone_config
snake_case__ =num_channels
snake_case__ =num_queries
snake_case__ =d_model
snake_case__ =encoder_ffn_dim
snake_case__ =encoder_layers
snake_case__ =encoder_attention_heads
snake_case__ =decoder_ffn_dim
snake_case__ =decoder_layers
snake_case__ =decoder_attention_heads
snake_case__ =dropout
snake_case__ =attention_dropout
snake_case__ =activation_dropout
snake_case__ =activation_function
snake_case__ =init_std
snake_case__ =init_xavier_std
snake_case__ =encoder_layerdrop
snake_case__ =decoder_layerdrop
snake_case__ =encoder_layers
snake_case__ =auxiliary_loss
snake_case__ =position_embedding_type
snake_case__ =backbone
snake_case__ =use_pretrained_backbone
snake_case__ =dilation
# Hungarian matcher
snake_case__ =class_cost
snake_case__ =bbox_cost
snake_case__ =giou_cost
# Loss coefficients
snake_case__ =mask_loss_coefficient
snake_case__ =dice_loss_coefficient
snake_case__ =cls_loss_coefficient
snake_case__ =bbox_loss_coefficient
snake_case__ =giou_loss_coefficient
snake_case__ =focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowercase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowercase ( self ) -> int:
return self.d_model
def _lowercase ( self ) -> Any:
snake_case__ =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case__ =self.backbone_config.to_dict()
snake_case__ =self.__class__.model_type
return output
class a__( snake_case__ ):
a_ : Tuple = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _lowercase ( self ) -> float:
return 1E-5
@property
def _lowercase ( self ) -> int:
return 12
| 581 |
'''simple docstring'''
from itertools import permutations
def a ( UpperCamelCase_ : tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case__ =[7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def a ( UpperCamelCase_ : int = 10 ) -> int:
return sum(
int(''.join(map(UpperCamelCase_ , UpperCamelCase_ ) ) )
for num in permutations(range(UpperCamelCase_ ) )
if is_substring_divisible(UpperCamelCase_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 581 | 1 |
"""simple docstring"""
from torch import nn
def _snake_case ( snake_case__ : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' ) | 91 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowercase = float('''nan''')
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Tuple ) -> Any:
A = sys.stdout
A = open(A_ ,'a' )
def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple:
return getattr(self.stdout ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str:
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) )
def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ):
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(snake_case__ , snake_case__ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(snake_case__ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
A = ''
return "\\\n".join(snake_case__ )
def _snake_case ( snake_case__ : str , snake_case__ : str ):
# unwrap multi-line input
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ):
A = []
A = []
A = F'{id}: {variation:<{longest_variation_len}}'
A = F'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ):
A = process_run_single(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
A = F'\33[2K\r{outcome}'
if len(snake_case__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = F'{outcome} {mean_target}'
if len(snake_case__ ) > 1:
results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}'
print(snake_case__ )
A = variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def _snake_case ( ):
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
A = pd.DataFrame(snake_case__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
A = df.apply(
lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(snake_case__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )]
print('\n\n'.join(snake_case__ ) )
def _snake_case ( ):
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
A = get_base_command(snake_case__ , snake_case__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) )
A = max(len(snake_case__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A = Tee(snake_case__ )
print(F'\n*** Running {len(snake_case__ )} benchmarks:' )
print(F'Base command: {" ".join(snake_case__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) )
process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ )
if __name__ == "__main__":
main() | 91 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : int = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def _a ( lowerCamelCase_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case : Optional[Any] =k.replace(lowerCamelCase_ , lowerCamelCase_ )
if k.startswith('''encoder''' ):
snake_case : List[Any] =k.replace('''.attn''' , '''.self_attn''' )
snake_case : str =k.replace('''norm1''' , '''self_attn_layer_norm''' )
snake_case : List[Any] =k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
snake_case : Union[str, Any] =k.replace('''norm1''' , '''self_attn_layer_norm''' )
snake_case : Optional[Any] =k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
snake_case : List[Any] =k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( lowerCamelCase_ ):
snake_case : List[str] =[
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
snake_case : List[Any] =sd.pop(lowerCamelCase_ )
snake_case : Optional[int] =k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
snake_case : int =v
A : Tuple = ["""START"""]
@torch.no_grad()
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] =torch.load(lowerCamelCase_ , map_location='''cpu''' )
snake_case : List[str] =model['''model''']
snake_case : Union[str, Any] =BlenderbotConfig.from_json_file(lowerCamelCase_ )
snake_case : Optional[int] =BlenderbotForConditionalGeneration(lowerCamelCase_ )
snake_case : str =m.model.state_dict().keys()
snake_case : str =[]
snake_case : List[Any] ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case : Dict =rename_state_dict_key(lowerCamelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case : Dict =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCamelCase_ )
m.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
m.half()
m.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
A : Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 714 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =list(lowerCamelCase_ )
snake_case : Optional[int] =list(lowerCamelCase_ )
snake_case : str =0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
snake_case : int ='''_'''
if count > 1:
return False
else:
return "".join(lowerCamelCase_ )
def _a ( lowerCamelCase_ ):
snake_case : str =[]
while True:
snake_case : str =['''$'''] * len(lowerCamelCase_ )
snake_case : Optional[int] =[]
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
snake_case : Optional[Any] =compare_string(binary[i] , binary[j] )
if k is False:
snake_case : Any ='''*'''
snake_case : Tuple ='''*'''
temp.append('''X''' )
for i in range(len(lowerCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCamelCase_ ) == 0:
return pi
snake_case : Optional[int] =list(set(lowerCamelCase_ ) )
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : List[Any] =[]
for minterm in minterms:
snake_case : List[str] =''''''
for _ in range(lowerCamelCase_ ):
snake_case : List[str] =str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCamelCase_ )
return temp
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =list(lowerCamelCase_ )
snake_case : str =list(lowerCamelCase_ )
snake_case : List[str] =0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] =[]
snake_case : Dict =[0] * len(lowerCamelCase_ )
for i in range(len(chart[0] ) ):
snake_case : Dict =0
snake_case : List[Any] =-1
for j in range(len(lowerCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
snake_case : Tuple =j
if count == 1:
snake_case : List[Any] =1
for i in range(len(lowerCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCamelCase_ ) ):
snake_case : Any =0
temp.append(prime_implicants[i] )
while True:
snake_case : Dict =0
snake_case : Optional[int] =-1
snake_case : Optional[int] =0
for i in range(len(lowerCamelCase_ ) ):
snake_case : Dict =chart[i].count(1 )
if count_n > max_n:
snake_case : Union[str, Any] =count_n
snake_case : List[str] =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCamelCase_ ) ):
snake_case : str =0
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] =[[0 for x in range(len(lowerCamelCase_ ) )] for x in range(len(lowerCamelCase_ ) )]
for i in range(len(lowerCamelCase_ ) ):
snake_case : Optional[int] =prime_implicants[i].count('''_''' )
for j in range(len(lowerCamelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCamelCase_ ):
snake_case : Optional[int] =1
return chart
def _a ( ):
snake_case : int =int(input('''Enter the no. of variables\n''' ) )
snake_case : Tuple =[
float(lowerCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case : Any =decimal_to_binary(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[Any] =check(lowerCamelCase_ )
print('''Prime Implicants are:''' )
print(lowerCamelCase_ )
snake_case : Any =prime_implicant_chart(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[int] =selection(lowerCamelCase_ , lowerCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 136 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for e in env_keys:
_snake_case = int(os.environ.get(SCREAMING_SNAKE_CASE__ , -1 ) )
if val >= 0:
return val
return default
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = os.environ.get(SCREAMING_SNAKE_CASE__ , str(SCREAMING_SNAKE_CASE__ ) )
return strtobool(SCREAMING_SNAKE_CASE__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="no" ):
'''simple docstring'''
_snake_case = os.environ.get(SCREAMING_SNAKE_CASE__ , str(SCREAMING_SNAKE_CASE__ ) )
return value
| 672 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase__ = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case : int = bs[:]
snake_case : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
snake_case : Tuple = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Any:
'''simple docstring'''
snake_case : List[str] = set()
snake_case : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Dict = char
return pairs
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str="replace" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Optional[int]="<mask>" , UpperCamelCase__ : Optional[Any]=False , **UpperCamelCase__ : Dict , ) -> str:
"""simple docstring"""
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
snake_case : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
snake_case : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
snake_case : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
snake_case : Union[str, Any] = json.load(UpperCamelCase__ )
snake_case : Dict = {v: k for k, v in self.encoder.items()}
snake_case : Optional[int] = errors # how to handle errors in decoding
snake_case : List[Any] = bytes_to_unicode()
snake_case : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
snake_case : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
snake_case : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = {}
snake_case : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case : Tuple = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case : List[str] = bigram
snake_case : int = []
snake_case : int = 0
while i < len(UpperCamelCase__ ):
try:
snake_case : Optional[int] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Optional[int] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Tuple = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case : List[str] = get_pairs(UpperCamelCase__ )
snake_case : str = ''' '''.join(UpperCamelCase__ )
snake_case : int = word
return word
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case : str = []
for token in re.findall(self.pat , UpperCamelCase__ ):
snake_case : Optional[int] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase ( self : int , UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = ''''''.join(UpperCamelCase__ )
snake_case : int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCAmelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : int = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
snake_case : str = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
snake_case : List[str] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=False , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
snake_case : Dict = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> str:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCamelCase__ : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
snake_case : List[str] = ''' '''.join(UpperCamelCase__ )
snake_case : Tuple = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
snake_case : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 638 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Any:
"""simple docstring"""
return getitem, k
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str ) -> Optional[int]:
"""simple docstring"""
return setitem, k, v
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Any:
"""simple docstring"""
return delitem, k
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] , *__A : Union[str, Any] ) -> str:
"""simple docstring"""
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
UpperCAmelCase_ : Any = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
UpperCAmelCase_ : Dict = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
UpperCAmelCase_ : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
UpperCAmelCase_ : Union[str, Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
UpperCAmelCase_ : Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
a_ : str = HashMap(initial_block_size=4 )
a_ : Tuple = {}
for _, (fun, *args) in enumerate(__A ):
a_ , a_ : Any = _run_operation(__A , __A , *__A )
a_ , a_ : Union[str, Any] = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
a_ : Tuple = {name for name in dir({} ) if is_public(__A )}
a_ : Any = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names
| 443 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''autoformer'''
snake_case__ : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 2_5 , SCREAMING_SNAKE_CASE__ : int = 3 , **SCREAMING_SNAKE_CASE__ : int , ) -> Union[str, Any]:
# time series specific configuration
a_ : Optional[int] = prediction_length
a_ : Tuple = context_length if context_length is not None else prediction_length
a_ : Tuple = distribution_output
a_ : int = loss
a_ : Tuple = input_size
a_ : int = num_time_features
a_ : Dict = lags_sequence
a_ : List[str] = scaling
a_ : Tuple = num_dynamic_real_features
a_ : Dict = num_static_real_features
a_ : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
a_ : Any = cardinality
else:
a_ : Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
a_ : Tuple = embedding_dimension
else:
a_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
a_ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a_ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
a_ : Tuple = d_model
a_ : int = encoder_attention_heads
a_ : Optional[int] = decoder_attention_heads
a_ : str = encoder_ffn_dim
a_ : List[str] = decoder_ffn_dim
a_ : Any = encoder_layers
a_ : Any = decoder_layers
a_ : List[Any] = dropout
a_ : Tuple = attention_dropout
a_ : Union[str, Any] = activation_dropout
a_ : List[Any] = encoder_layerdrop
a_ : List[str] = decoder_layerdrop
a_ : Optional[Any] = activation_function
a_ : Union[str, Any] = init_std
a_ : List[str] = use_cache
# Autoformer
a_ : Tuple = label_length
a_ : List[Any] = moving_average
a_ : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 443 | 1 |
import datasets
from .evaluate import evaluate
A : Union[str, Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
A : Any = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowercase__ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowercase__ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 15 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _lowerCAmelCase :
SCREAMING_SNAKE_CASE_: Union[str, Any] = MBartConfig
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
SCREAMING_SNAKE_CASE_: Tuple = 'gelu'
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_2 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=2_0 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : str = batch_size
_SCREAMING_SNAKE_CASE : str = seq_length
_SCREAMING_SNAKE_CASE : Dict = is_training
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : str = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
_SCREAMING_SNAKE_CASE : Tuple = bos_token_id
def A ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_SCREAMING_SNAKE_CASE : int = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_SCREAMING_SNAKE_CASE : int = inputs_dict['input_ids']
_SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Any = inputs_dict['attention_mask'][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict['head_mask']
_SCREAMING_SNAKE_CASE : List[Any] = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
_SCREAMING_SNAKE_CASE : str = past_key_values[1]
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_: List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_: Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_: Tuple = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = False
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = TFMBartModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ )
def A ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def A ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_: str = [
' UN Chief Says There Is No Military Solution in Syria',
]
SCREAMING_SNAKE_CASE_: Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
SCREAMING_SNAKE_CASE_: Dict = 'facebook/mbart-large-en-ro'
@cached_property
def A ( self ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A ( self , **lowerCAmelCase_ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def A ( self , **lowerCAmelCase_ ) -> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='tf' )
_SCREAMING_SNAKE_CASE : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_SCREAMING_SNAKE_CASE : int = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def A ( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 621 | 0 |
"""simple docstring"""
import math
from collections.abc import Callable
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_UpperCAmelCase = xa
_UpperCAmelCase = xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE ) == function(SCREAMING_SNAKE_CASE ):
raise ZeroDivisionError('float division by zero, could not find root' )
_UpperCAmelCase = x_na - (
function(SCREAMING_SNAKE_CASE ) / ((function(SCREAMING_SNAKE_CASE ) - function(SCREAMING_SNAKE_CASE )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_UpperCAmelCase = x_na
_UpperCAmelCase = x_na
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE,3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 710 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 494 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
__UpperCamelCase : int = """1"""
__UpperCamelCase : Dict = """0"""
__UpperCamelCase : str = """1"""
__UpperCamelCase : int = ort.SessionOptions()
__UpperCamelCase : Dict = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
__UpperCamelCase : Tuple = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
__UpperCamelCase : int = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
__UpperCamelCase : Tuple = ort.RunOptions()
__UpperCamelCase : int = 128
__UpperCamelCase : List[str] = 1
__UpperCamelCase : List[Any] = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : str = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
__UpperCamelCase : int = time.time()
__UpperCamelCase : Dict = 2000
__UpperCamelCase : Optional[Any] = {}
for iter in range(max_iters):
__UpperCamelCase : Union[str, Any] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 80 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase_ : str = logging.getLogger(__name__)
UpperCAmelCase_ : List[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase_ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(snake_case__ )} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __UpperCamelCase ( self ) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase : Optional[str] = field(default=snake_case__ , metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__lowerCAmelCase : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__lowerCAmelCase : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=snake_case__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowerCAmelCase : float = field(
default=0.1_5 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def __UpperCamelCase ( self ) -> str:
if self.train_file is not None:
_a : int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_a : List[Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
with open(A , 'r' , encoding='utf-8' ) as f:
_a : Dict = [json.loads(A ) for line in f.read().splitlines() if (len(A ) > 0 and not line.isspace())]
assert len(A ) == len(A )
_a : Any = {c: dataset[c] for c in dataset.column_names}
_a : Optional[Any] = refs
return Dataset.from_dict(A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_a : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_a : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
_a : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
_a : int = {}
if data_args.train_file is not None:
_a : Tuple = data_args.train_file
if data_args.validation_file is not None:
_a : List[Any] = data_args.validation_file
_a : Optional[Any] = data_args.train_file.split('.' )[-1]
if extension == "txt":
_a : Optional[Any] = 'text'
_a : int = load_dataset(A , data_files=A )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a : List[str] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_a : List[str] = AutoConfig.from_pretrained(model_args.config_name , **A )
elif model_args.model_name_or_path:
_a : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , **A )
else:
_a : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
_a : Any = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_a : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **A )
elif model_args.model_name_or_path:
_a : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **A )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_a : List[Any] = AutoModelForMaskedLM.from_config(A )
model.resize_token_embeddings(len(A ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_a : str = datasets['train'].column_names
else:
_a : int = datasets['validation'].column_names
_a : Union[str, Any] = 'text' if 'text' in column_names else column_names[0]
_a : List[Any] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(A ):
# Remove empty lines
_a : Union[str, Any] = [line for line in examples['text'] if len(A ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=A , truncation=A , max_length=data_args.max_seq_length )
_a : Any = datasets.map(
A , batched=A , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_a : str = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_a : Tuple = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_a : Union[str, Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_a : Optional[int] = False
# Data collator
# This one will take care of randomly masking the tokens.
_a : Tuple = DataCollatorForWholeWordMask(tokenizer=A , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=A , args=A , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=A , data_collator=A , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_a : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_a : Optional[int] = model_args.model_name_or_path
else:
_a : int = None
_a : Tuple = trainer.train(resume_from_checkpoint=A )
trainer.save_model() # Saves the tokenizer too for easy upload
_a : Union[str, Any] = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_a : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_a : Union[str, Any] = trainer.evaluate()
_a : Any = math.exp(eval_output['eval_loss'] )
_a : List[str] = perplexity
_a : Optional[int] = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def UpperCAmelCase_ ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 120 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_a , )
assert hasattr(self , 'env' )
def lowercase__ ( self , _a=1 ):
"""simple docstring"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def lowercase__ ( self , _a ):
"""simple docstring"""
TrainingJobAnalytics(_a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowercase__ ( self ):
"""simple docstring"""
# create estimator
a__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _a )
| 126 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( a : Callable , a : float , a : float , a : float , a : float ):
a__ = int(np.ceil((x_end - xa) / step_size ) )
a__ = np.zeros((n + 1,) )
a__ = ya
a__ = xa
for k in range(a ):
a__ = y[k] + step_size * ode_func(a , y[k] )
a__ = y[k] + (
(step_size / 2) * (ode_func(a , y[k] ) + ode_func(x + step_size , a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : List[str] = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = LxmertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**UpperCamelCase_ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 661 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 189 | 0 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> str:
"""simple docstring"""
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCAmelCase_ ) ) as con:
UpperCamelCase = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = os.path.join(UpperCAmelCase_ , "tmp.sql" )
UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
UpperCamelCase = iter_sql_file(UpperCAmelCase_ )
UpperCamelCase = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = os.path.join(UpperCAmelCase_ , "tmp.sql" )
UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
UpperCamelCase = iter_sql_file(UpperCAmelCase_ )
UpperCamelCase = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = os.path.join(UpperCAmelCase_ , "tmp.sql" )
UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
with pytest.raises(UpperCAmelCase_ ):
SqlDatasetWriter(UpperCAmelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 708 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCamelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["c"] )
self.assertEqual(UpperCAmelCase_ , [2] )
# Out indices set to match out features
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(["a", "c"] , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [0, 2] )
# Out features set to match out indices
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(UpperCAmelCase_ , [0, 2] , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [0, 2] )
# Out features selected from negative indices
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(UpperCAmelCase_ , [-3, -1] , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [-3, -1] )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , UpperCAmelCase_ )
# Out features must be a list
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(UpperCAmelCase_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(UpperCAmelCase_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCamelCase = BackboneMixin()
UpperCamelCase = ["a", "b", "c"]
UpperCamelCase = ["a", "c"]
UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCamelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 556 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
SCREAMING_SNAKE_CASE_ = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
SCREAMING_SNAKE_CASE_ = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
SCREAMING_SNAKE_CASE_ = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_=4 , snake_case_=False ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
(__UpperCAmelCase): Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 523 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_attention_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=16 , lowerCamelCase__=[128, 256, 384] , lowerCamelCase__=[4, 6, 8] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=[16, 16, 16] , lowerCamelCase__=0 , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , ):
lowerCAmelCase_: int = parent
lowerCAmelCase_: Tuple = batch_size
lowerCAmelCase_: List[str] = image_size
lowerCAmelCase_: Tuple = num_channels
lowerCAmelCase_: Optional[int] = kernel_size
lowerCAmelCase_: int = stride
lowerCAmelCase_: Optional[int] = padding
lowerCAmelCase_: Tuple = hidden_sizes
lowerCAmelCase_: Union[str, Any] = num_attention_heads
lowerCAmelCase_: Tuple = depths
lowerCAmelCase_: Optional[int] = key_dim
lowerCAmelCase_: Optional[Any] = drop_path_rate
lowerCAmelCase_: List[str] = patch_size
lowerCAmelCase_: Any = attention_ratio
lowerCAmelCase_: Tuple = mlp_ratio
lowerCAmelCase_: Any = initializer_range
lowerCAmelCase_: List[str] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCAmelCase_: Any = is_training
lowerCAmelCase_: Optional[int] = use_labels
lowerCAmelCase_: Dict = num_labels
lowerCAmelCase_: Any = initializer_range
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_: Tuple = None
if self.use_labels:
lowerCAmelCase_: int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_: Tuple = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ )
lowerCAmelCase_: Tuple = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCAmelCase_: str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = self.num_labels
lowerCAmelCase_: str = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = config_and_inputs
lowerCAmelCase_: Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE: int = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Optional[Any] = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: List[Any] = False
def _a ( self ):
lowerCAmelCase_: List[str] = LevitModelTester(self )
lowerCAmelCase_: Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _a ( self ):
pass
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
lowerCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_: Any = [*signature.parameters.keys()]
lowerCAmelCase_: str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _a ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_: Dict = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase_: str = outputs.hidden_states
lowerCAmelCase_: List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowerCAmelCase_ , lowerCAmelCase_: int = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCAmelCase_: Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCAmelCase_ , lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_: Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
pass
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
lowerCAmelCase_: List[str] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _a ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCAmelCase_: Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase_: Any = False
lowerCAmelCase_: Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCAmelCase_: Any = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: List[str] = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowerCAmelCase_: Tuple = problem_type["title"]
lowerCAmelCase_: Optional[Any] = problem_type["num_labels"]
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase_: List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowerCAmelCase_: List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowerCAmelCase_: Dict = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _a ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_: Optional[int] = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( ):
lowerCAmelCase_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ):
lowerCAmelCase_: Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
lowerCAmelCase_: str = self.default_image_processor
lowerCAmelCase_: int = prepare_img()
lowerCAmelCase_: Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ )
# verify the logits
lowerCAmelCase_: str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) | 613 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = TaTokenizer
__A = []
def __init__( self : Optional[int] , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[str]="</s>" , lowercase_ : int="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : List[str]=100 , lowercase_ : Optional[int]=None , **lowercase_ : List[str] , ) -> List[Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id_" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
_UpperCamelCase = extra_ids
@staticmethod
def __UpperCAmelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
logger.info(f'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return [self.convert_tokens_to_ids(lowercase_) for token in self.get_sentinel_tokens()]
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Tuple = eval_examples
lowerCAmelCase__ :Optional[Any] = post_process_function
lowerCAmelCase__ :Tuple = quant_trainer_args
lowerCAmelCase__ :Tuple = 1_2_8 # default number of calibration samples
def snake_case ( self , __UpperCAmelCase=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCAmelCase__ :Dict = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ :Optional[int] = self._remove_unused_columns(__UpperCAmelCase , description='Calibration' )
return DataLoader(
__UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ :Tuple = self.get_calib_dataloader(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.model
quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args , calib=__UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(__UpperCAmelCase )
logger.info('***** Running calibration *****' )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(__UpperCAmelCase ):
# Prediction step
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.prediction_step(__UpperCAmelCase , __UpperCAmelCase , prediction_loss_only=__UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__UpperCAmelCase , self.quant_trainer_args )
lowerCAmelCase__ :List[str] = model
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = "eval" ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ :List[Any] = self.get_eval_dataloader(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ :Union[str, Any] = self.compute_metrics
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ :Any = eval_loop(
__UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , )
finally:
lowerCAmelCase__ :int = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ :List[Any] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions )
lowerCAmelCase__ :Any = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase__ :List[Any] = metrics.pop(__UpperCAmelCase )
self.log(__UpperCAmelCase )
else:
lowerCAmelCase__ :int = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase__ :str = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase = "test" ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ :Any = self.compute_metrics
lowerCAmelCase__ :str = None
lowerCAmelCase__ :Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ :Dict = eval_loop(
__UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , )
finally:
lowerCAmelCase__ :Any = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ :List[Any] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , 'predict' )
lowerCAmelCase__ :Dict = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase__ :Dict = metrics.pop(__UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase="./" ):
'''simple docstring'''
lowerCAmelCase__ :str = self.eval_dataset
lowerCAmelCase__ :int = self.get_eval_dataloader(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = next(iter(__UpperCAmelCase ) )
# saving device - to make it consistent
lowerCAmelCase__ :Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCAmelCase__ :Any = tuple(v.to(__UpperCAmelCase ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ :int = True
lowerCAmelCase__ :Optional[Any] = self.model.to(__UpperCAmelCase )
model.eval()
model.float()
lowerCAmelCase__ :Dict = model.module if hasattr(__UpperCAmelCase , 'module' ) else model
quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args )
lowerCAmelCase__ :Dict = os.path.join(__UpperCAmelCase , 'model.onnx' )
logger.info(F"exporting model to {output_model_file}" )
lowerCAmelCase__ :Any = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , export_params=__UpperCAmelCase , opset_version=1_3 , do_constant_folding=__UpperCAmelCase , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=__UpperCAmelCase , )
logger.info('onnx export finished' )
| 93 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
UpperCAmelCase_: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A__ ) != do_lower_case
or normalizer_state.get("strip_accents" , A__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A__ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Any = getattr(A__ , normalizer_state.pop("type" ) )
UpperCAmelCase_: Any = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: Optional[int] = normalizer_class(**A__ )
UpperCAmelCase_: Dict = do_lower_case
def snake_case_ ( self , A__ , A__=None ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = [self.sep_token_id]
UpperCAmelCase_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ ) | 137 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Dict:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(__snake_case , __snake_case )
if k.startswith('''encoder''' ):
__UpperCAmelCase =k.replace('''.attn''' , '''.self_attn''' )
__UpperCAmelCase =k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase =k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__UpperCAmelCase =k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase =k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__UpperCAmelCase =k.replace('''norm3''' , '''final_layer_norm''' )
return k
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[Any]:
__UpperCAmelCase =[
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__UpperCAmelCase =sd.pop(__snake_case )
__UpperCAmelCase =k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__UpperCAmelCase =v
UpperCamelCase_ = ["START"]
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase =torch.load(__snake_case , map_location='''cpu''' )
__UpperCAmelCase =model["model"]
__UpperCAmelCase =BlenderbotConfig.from_json_file(__snake_case )
__UpperCAmelCase =BlenderbotForConditionalGeneration(__snake_case )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
UpperCamelCase_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 713 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=1_0 , UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embeddings_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =hidden_act
__UpperCAmelCase =num_labels
__UpperCAmelCase =scope
__UpperCAmelCase =len(UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.num_labels)
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def A__ (self):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFRegNetModel(config=UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , training=UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =TFRegNetForImageClassification(UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a_ : Union[str, Any] = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a_ : str = False
a_ : List[str] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFRegNetModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A__ (self):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def A__ (self):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A__ (self):
'''simple docstring'''
pass
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) , training=UpperCAmelCase)
__UpperCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase =layer_type
__UpperCAmelCase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={}):
__UpperCAmelCase =model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase):
if isinstance(UpperCAmelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase):
recursive_check(UpperCAmelCase , UpperCAmelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase , UpperCAmelCase)) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase)
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True})
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True})
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =TFRegNetModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
__UpperCAmelCase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A__ (self):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(images=UpperCAmelCase , return_tensors='''tf''')
# forward pass
__UpperCAmelCase =model(**UpperCAmelCase , training=UpperCAmelCase)
# verify the logits
__UpperCAmelCase =tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase)
__UpperCAmelCase =tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4)
| 142 | 0 |
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A = [x.strip() for x in open(lowerCAmelCase__ ).readlines()]
A = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )]
A = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
if save_path is not None:
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 106 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase__ : Any = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase__ : Optional[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase__ : Tuple = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(UpperCamelCase__ )-1}''' )
if "norm" in key:
UpperCamelCase__ : Dict = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase__ : Tuple = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(UpperCamelCase__ )-1}''' )
if "layer_norm1" in key:
UpperCamelCase__ : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ : Optional[Any] = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase__ : str = key.replace(f'''block{idx}''' , f'''block.{int(UpperCamelCase__ )-1}''' )
if "attn.q" in key:
UpperCamelCase__ : Dict = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase__ : Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase__ : List[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCamelCase__ : int = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCamelCase__ : str = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCamelCase__ : List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCamelCase__ : Optional[int] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ : str = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase__ : Tuple = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(UpperCamelCase__ )-1}''' )
if "bot_conv" in key:
UpperCamelCase__ : List[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase__ : str = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase__ : Tuple = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase__ : List[Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase__ : Tuple = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase__ : int = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase__ : List[str] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase__ : Tuple = key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCamelCase__ : List[str] = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase__ : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ : Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Any = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=None ):
UpperCamelCase__ : Any = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase__ : str = GLPNImageProcessor()
# prepare image
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ : Tuple = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase__ : Optional[Any] = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ , UpperCamelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase__ : str = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
UpperCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
UpperCamelCase__ : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase__ : Tuple = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCamelCase__ : Optional[Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
lowerCamelCase =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 285 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_ ( snake_case_ : Any ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = """imagenet-22k-id2label.json"""
else:
__lowerCAmelCase = """imagenet-1k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=snake_case_ , depths=snake_case_ , focal_levels=snake_case_ , focal_windows=snake_case_ , use_conv_embed=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , use_post_layernorm=snake_case_ , use_layerscale=snake_case_ , )
return config
def UpperCamelCase_ ( snake_case_ : Dict ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowerCAmelCase = """encoder.""" + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
__lowerCAmelCase = """layernorm.bias"""
if "head" in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
__lowerCAmelCase = """focalnet.""" + name
return name
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("""Checkpoint URL: """ , snake_case_ )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(snake_case_ )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(snake_case_ )
__lowerCAmelCase = FocalNetForImageClassification(snake_case_ )
model.eval()
# load state dict
model.load_state_dict(snake_case_ )
# verify conversion
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = BitImageProcessor(
do_resize=snake_case_ , size={"""shortest_edge""": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case_ , crop_size=2_24 , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , )
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__lowerCAmelCase = processor(images=snake_case_ , return_tensors="""pt""" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(snake_case_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case_ , atol=1E-4 )
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A : List[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 330 | '''simple docstring'''
from collections import defaultdict
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
__lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE__ ) )
]
__lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase = (1 << len(SCREAMING_SNAKE_CASE__ )) - 1
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase = self.count_ways_until(SCREAMING_SNAKE_CASE__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> str:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 330 | 1 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase_ : int = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def A_ (__a , __a ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def A_ (__a ):
'''simple docstring'''
A_ = _TestCommandArgs(dataset=__A , all_configs=__A , save_infos=__A )
A_ = TestCommand(*__A )
test_command.run()
A_ = os.path.join(__A , "README.md" )
assert os.path.exists(__A )
A_ = DatasetInfosDict.from_directory(__A )
A_ = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A_ , A_ = getattr(dataset_infos["default"] , __A ), getattr(expected_dataset_infos["default"] , __A )
if key == "num_bytes":
assert is_apercent_close(__A , __A )
elif key == "splits":
assert list(__A ) == list(__A )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 115 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 601 | 0 |
def A ( _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
_UpperCAmelCase = input_str.split('_' )
_UpperCAmelCase = 0 if use_pascal else 1
_UpperCAmelCase = words[start_index:]
_UpperCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCAmelCase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 639 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase : Dict = 16
__lowerCAmelCase : str = 32
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 , lowerCamelCase__ = "bert-base-cased" ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase__ ) - 1:
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCAmelCase__ = metric.compute()
return eval_metric["accuracy"]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["""lr"""]
lowerCAmelCase__ = int(config["""num_epochs"""] )
lowerCAmelCase__ = int(config["""seed"""] )
lowerCAmelCase__ = int(config["""batch_size"""] )
lowerCAmelCase__ = args.model_name_or_path
set_seed(lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ )
# Instantiate optimizer
lowerCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase__ = 1
lowerCAmelCase__ = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , )
else:
lowerCAmelCase__ = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ = 0
lowerCAmelCase__ = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase__ = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase__ = int(lowerCamelCase__ ) + 1
lowerCAmelCase__ = evaluation_loop(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase__ = json.load(lowerCamelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase__ = {}
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
lowerCAmelCase__ = model(**lowerCamelCase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase__ = f"""epoch_{epoch}"""
lowerCAmelCase__ = os.path.join(args.output_dir , lowerCamelCase__ )
accelerator.save_state(lowerCamelCase__ )
lowerCAmelCase__ = evaluation_loop(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = accuracy
lowerCAmelCase__ = lr_scheduler.get_lr()[0]
lowerCAmelCase__ = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase__ = epoch
lowerCAmelCase__ = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowerCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 644 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__lowerCAmelCase : Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.feature_extractor
lowerCAmelCase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowerCAmelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ = """bias"""
elif "weight" in name:
lowerCAmelCase__ = """weight"""
else:
lowerCAmelCase__ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ = name.split(""".""" )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("""adaptor.""" )[-1]
lowerCAmelCase__ = name.split(""".""" )
if items[1].isdigit():
lowerCAmelCase__ = int(items[1] )
else:
lowerCAmelCase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowerCAmelCase__ = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowerCAmelCase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowerCAmelCase__ = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowerCAmelCase__ = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowerCAmelCase__ = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowerCAmelCase__ = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(
lowerCamelCase__ , add_adapter=lowerCamelCase__ , adapter_stride=lowerCamelCase__ , adapter_kernel_size=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , output_hidden_size=lowerCamelCase__ , )
lowerCAmelCase__ = MBartConfig.from_pretrained(lowerCamelCase__ )
# load model
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowerCAmelCase__ = model[0].eval()
# load feature extractor
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , use_auth_token=lowerCamelCase__ )
# set weights for wav2vec2 encoder
lowerCAmelCase__ = WavaVecaModel(lowerCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ )
# load decoder weights
lowerCAmelCase__ = MBartForCausalLM(lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCAmelCase__ = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCAmelCase__ = False
lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = hf_wavavec.config.to_dict()
lowerCAmelCase__ = tokenizer.pad_token_id
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = """mbart50"""
lowerCAmelCase__ = """wav2vec2"""
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = 25_0004
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 644 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ) -> str:
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=_SCREAMING_SNAKE_CASE , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=_SCREAMING_SNAKE_CASE , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=_SCREAMING_SNAKE_CASE , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=_SCREAMING_SNAKE_CASE , default=0 , help="""cuda_id.""" , )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if not len(_SCREAMING_SNAKE_CASE ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new("""RGB""" , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(_SCREAMING_SNAKE_CASE ):
grid.paste(_SCREAMING_SNAKE_CASE , box=(i % cols * w, i // cols * h) )
return grid
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="robotic cat with wings" , _SCREAMING_SNAKE_CASE=7.5 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=42 , ) -> Dict:
snake_case_ = torch.Generator(pipeline.device ).manual_seed(_SCREAMING_SNAKE_CASE )
snake_case_ = pipeline(
_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , ).images
snake_case_ = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = image_grid(_SCREAMING_SNAKE_CASE , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__SCREAMING_SNAKE_CASE : List[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__SCREAMING_SNAKE_CASE : Optional[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__SCREAMING_SNAKE_CASE : Any = unet.to(torch.device('cuda', args.cuda_id))
__SCREAMING_SNAKE_CASE : Tuple = pipeline.to(unet.device)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__SCREAMING_SNAKE_CASE : Dict = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 2 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [[] for _ in range(__lowerCamelCase )]
__lowercase = size
def __getitem__( self : List[str] , __lowerCamelCase : int ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return self._size
def UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> Any:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ) -> int | None:
'''simple docstring'''
__lowercase = deque([start_vertex] )
__lowercase = [None] * self.size
__lowercase = 0
while queue:
__lowercase = queue.popleft()
__lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowercase = current_distance + edge.weight
__lowercase = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
__lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = False, False, False
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__UpperCamelCase = field(default='''Audio''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowercase = BytesIO()
sf.write(__lowerCamelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowercase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
__lowercase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767
__lowercase = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__lowercase , __lowercase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__lowercase = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__lowercase = token_per_repo_id or {}
__lowercase = path.split('::' )[-1]
try:
__lowercase = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['repo_id']
__lowercase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowercase = None
with xopen(__lowerCamelCase , 'rb' , use_auth_token=__lowerCamelCase ) as f:
__lowercase , __lowercase = sf.read(__lowerCamelCase )
else:
__lowercase , __lowercase = sf.read(__lowerCamelCase )
__lowercase = array.T
if self.mono:
__lowercase = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowercase = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
__lowercase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
__lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__lowercase = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__lowercase = storage.field('bytes' )
else:
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__lowercase = storage.field('path' )
else:
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Any ):
with xopen(__lowerCamelCase , 'rb' ) as f:
__lowercase = f.read()
return bytes_
__lowercase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowercase = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 375 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A__ = pd.read_csv("sample_data.csv", header=None)
A__ = df.shape[:1][0]
# If you're using some other dataset input the target column
A__ = df.iloc[:, 1:2]
A__ = actual_data.values.reshape(len_data, 1)
A__ = MinMaxScaler().fit_transform(actual_data)
A__ = 10
A__ = 5
A__ = 20
A__ = len_data - periods * look_back
A__ = actual_data[:division]
A__ = actual_data[division - look_back :]
A__ , A__ = [], []
A__ , A__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A__ = np.array(train_x)
A__ = np.array(test_x)
A__ = np.array([list(i.ravel()) for i in train_y])
A__ = np.array([list(i.ravel()) for i in test_y])
A__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
A__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
A__ = model.predict(x_test)
| 721 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
_lowercase : bool = False
_lowercase : float = 3.0
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCamelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
__magic_name__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__magic_name__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __UpperCamelCase )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
A__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A__ = Accelerator(kwargs_handlers=[ddp_scaler])
A__ = torch.nn.Linear(100, 200)
A__ = accelerator.prepare(model)
# Check the values changed in kwargs
A__ = ""
A__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 184 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__lowercase = '''Create a default config file for Accelerate with only a few flags set.'''
def snake_case__ ( _A: Dict="no" , _A: str = default_json_config_file , _A: bool = False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = Path(_A )
path.parent.mkdir(parents=_A , exist_ok=_A )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
lowerCAmelCase = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
lowerCAmelCase = num_gpus
lowerCAmelCase = False
if num_gpus > 1:
lowerCAmelCase = """MULTI_GPU"""
else:
lowerCAmelCase = """NO"""
elif is_xpu_available() and use_xpu:
lowerCAmelCase = torch.xpu.device_count()
lowerCAmelCase = num_xpus
lowerCAmelCase = False
if num_xpus > 1:
lowerCAmelCase = """MULTI_XPU"""
else:
lowerCAmelCase = """NO"""
elif is_npu_available():
lowerCAmelCase = torch.npu.device_count()
lowerCAmelCase = num_npus
lowerCAmelCase = False
if num_npus > 1:
lowerCAmelCase = """MULTI_NPU"""
else:
lowerCAmelCase = """NO"""
else:
lowerCAmelCase = 0
lowerCAmelCase = True
lowerCAmelCase = 1
lowerCAmelCase = """NO"""
lowerCAmelCase = ClusterConfig(**_A )
config.to_json_file(_A )
return path
def snake_case__ ( _A: str , _A: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = parser.add_parser("""default""" , parents=_A , help=_A , formatter_class=_A )
parser.add_argument(
"""--config_file""" , default=_A , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=_A , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=_A )
return parser
def snake_case__ ( _A: Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 370 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(__UpperCAmelCase , 0 , __UpperCAmelCase , args=(__UpperCAmelCase) )[0]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return math.pow(__UpperCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowerCamelCase_ : Union[str, Any] = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__lowerCamelCase : Tuple = input("""Enter a string """).strip()
__lowerCamelCase : Union[str, Any] = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 418 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase__ = {
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
UpperCamelCase__ = {
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase__ = state_dict.pop(__A )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase__ = name.replace("emb.", "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase__ = name.replace("blocks.0.ln0", "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase__ = re.sub(r"blocks\.(\d+)\.att", r"blocks.\1.attention", __A )
# ffn -> feed_forward
UpperCAmelCase__ = re.sub(r"blocks\.(\d+)\.ffn", r"blocks.\1.feed_forward", __A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase__ = name.replace(".time_mix_k", ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase__ = name.replace(".time_mix_v", ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase__ = name.replace(".time_mix_r", ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase__ = "rwkv." + name
UpperCAmelCase__ = weight
return state_dict
def lowerCAmelCase_ ( __A, __A, __A, __A=None, __A=None, __A=False, __A=None ) -> List[Any]:
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase__ = 50_277
UpperCAmelCase__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase__ = PreTrainedTokenizerFast(tokenizer_file=__A )
UpperCAmelCase__ = len(__A )
tokenizer.save_pretrained(__A )
# 2. Build the config
UpperCAmelCase__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
UpperCAmelCase__ = RwkvConfig(
vocab_size=__A, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(__A )
# 3. Download model file then convert state_dict
UpperCAmelCase__ = hf_hub_download(__A, __A )
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )
UpperCAmelCase__ = convert_state_dict(__A )
# 4. Split in shards and save
UpperCAmelCase__ , UpperCAmelCase__ = shard_checkpoint(__A )
for shard_file, shard in shards.items():
torch.save(__A, os.path.join(__A, __A ) )
if index is not None:
UpperCAmelCase__ = os.path.join(__A, __A )
# Save the index as well
with open(__A, "w", encoding="utf-8" ) as f:
UpperCAmelCase__ = json.dumps(__A, indent=2, sort_keys=__A ) + "\n"
f.write(__A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase__ = torch.load(os.path.join(__A, __A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(__A, __A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(__A )
model.push_to_hub(__A, max_shard_size="2GB" )
tokenizer.push_to_hub(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
UpperCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 486 | import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = ['image_processor', 'tokenizer']
__UpperCAmelCase : List[str] = 'FlavaImageProcessor'
__UpperCAmelCase : Dict = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self : int , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.image_processor
def __call__(self : Optional[int] , __UpperCAmelCase : Optional[ImageInput] = None , __UpperCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase__ = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
UpperCAmelCase__ = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 486 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase__ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
__lowercase = None
__lowercase = os.path.abspath(os.path.join("examples" , "by_feature" ) )
__lowercase = os.path.abspath("examples" )
for item in os.listdir(lowerCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
__lowercase = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase_ , feature_script=lowerCAmelCase_ , tested_section="main()" if parser_only else "training_function()" , ):
__lowercase = compare_against_test(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = "\n".join(lowerCAmelCase_ )
if special_strings is not None:
for string in special_strings:
__lowercase = diff.replace(lowerCAmelCase_ , "" )
self.assertEqual(lowerCAmelCase_ , "" )
def snake_case__ ( self ):
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ )
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
__lowercase = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@mock.patch.dict(os.environ ,{"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = False
@classmethod
def snake_case__ ( cls ):
super().setUpClass()
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__lowercase = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def snake_case__ ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self ):
__lowercase = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def snake_case__ ( self ):
__lowercase = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def snake_case__ ( self ):
__lowercase = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
self.assertNotIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
else:
self.assertIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
@slow
def snake_case__ ( self ):
__lowercase = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
__lowercase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
__lowercase = re.findall("({.+})" , lowerCAmelCase_ )
__lowercase = [r for r in results if "accuracy" in r][-1]
__lowercase = ast.literal_eval(lowerCAmelCase_ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def snake_case__ ( self ):
__lowercase = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
__lowercase = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "tracking" ) ) )
def snake_case__ ( self ):
__lowercase = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def snake_case__ ( self ):
__lowercase = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 576 | def __lowercase ( ) -> List[Any]:
'''simple docstring'''
__lowercase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__lowercase = 6
__lowercase = 1
__lowercase = 1_901
__lowercase = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__lowercase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__lowercase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__lowercase = day - days_per_month[month - 2]
if month > 12:
year += 1
__lowercase = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 576 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 73 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : Union[List[PIL.Image.Image], np.ndarray]
snake_case : Optional[List[bool]]
snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 414 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> None:
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ ) -> bytes:
A__ = B"\x80" + (B"\x00" * (63 - (len(SCREAMING_SNAKE_CASE__ ) + 8) % 64))
A__ = struct.pack(">Q" , (len(SCREAMING_SNAKE_CASE__ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self ) -> None:
# Convert into blocks of 64 bytes
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack(">16L" , SCREAMING_SNAKE_CASE__ ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(SCREAMING_SNAKE_CASE__ , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 11 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(SCREAMING_SNAKE_CASE__ , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 13 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> None:
import hashlib
A__ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() )
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
A__ = f.read()
else:
A__ = bytes(UpperCAmelCase_, "utf-8" )
print(SHAaaa(UpperCAmelCase_ ).hash )
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Tuple = ["image_processor", "tokenizer"]
A__ : Optional[int] = "FlavaImageProcessor"
A__ : Optional[int] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE__ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if images is not None:
A__ = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_image_mask=SCREAMING_SNAKE_CASE__ , return_codebook_pixels=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__ ( self ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def snake_case__ ( self ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 562 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
SCREAMING_SNAKE_CASE_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
SCREAMING_SNAKE_CASE_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ConvBertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
a_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a_ ) != do_lower_case
or normalizer_state.get("strip_accents" , a_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a_ ) != tokenize_chinese_chars
):
a_ : Optional[int] = getattr(a_ , normalizer_state.pop("type" ) )
a_ : Tuple = do_lower_case
a_ : int = strip_accents
a_ : List[str] = tokenize_chinese_chars
a_ : List[Any] = normalizer_class(**a_ )
a_ : Optional[Any] = do_lower_case
def snake_case_ ( self , a_ , a_=None ):
a_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a_ , a_ = None ):
a_ : Optional[int] = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : Dict = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ ) | 237 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
a_ : str = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
a_ : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
a_ : Optional[int] = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
a_ : Any = tf_top_k_top_p_filtering(a_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
a_ : Dict = output[output != -float("inf" )]
a_ : str = tf.cast(
tf.where(tf.not_equal(a_ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a_ , a_ , rtol=1e-12 )
tf.debugging.assert_equal(a_ , a_ )
@require_tf
class snake_case_ ( unittest.TestCase ,a_ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__lowerCAmelCase = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case_ ( self ):
# TF-only test: tf.saved_model export
a_ : Dict = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a_ : str = 2
a_ : int = 2
class snake_case_ ( tf.Module ):
def __init__( self , a_ ):
super(a_ , self ).__init__()
a_ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=a_ , )
def snake_case_ ( self , a_ , a_ ):
a_ : int = self.model.generate(
input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , )
return {"sequences": outputs["sequences"]}
a_ : Optional[Any] = [[2, 0], [1_0_2, 1_0_3]]
a_ : List[str] = [[1, 0], [1, 1]]
a_ : Dict = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} )
a_ : List[str] = tf.saved_model.load(a_ ).signatures["serving_default"]
for batch_size in range(1 , len(a_ ) + 1 ):
a_ : str = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
a_ : Union[str, Any] = serving_func(**a_ )["sequences"]
a_ : List[Any] = test_model.generate(**a_ , max_new_tokens=a_ )
tf.debugging.assert_equal(a_ , a_ )
@slow
def snake_case_ ( self ):
# TF-only test: tf.saved_model export
a_ : List[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a_ : int = 1
a_ : Optional[Any] = 2
class snake_case_ ( tf.Module ):
def __init__( self , a_ ):
super(a_ , self ).__init__()
a_ : str = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=a_ , )
def snake_case_ ( self , a_ , a_ ):
a_ : Tuple = self.model.generate(
input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , )
return {"sequences": outputs["sequences"]}
a_ : Optional[int] = [[2], [1_0_2, 1_0_3]]
a_ : Union[str, Any] = [[1], [1, 1]]
a_ : List[str] = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} )
a_ : List[Any] = tf.saved_model.load(a_ ).signatures["serving_default"]
for input_row in range(len(a_ ) ):
a_ : Optional[Any] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
a_ : List[str] = serving_func(**a_ )["sequences"]
a_ : Tuple = test_model.generate(**a_ , max_new_tokens=a_ )
tf.debugging.assert_equal(a_ , a_ )
@slow
@require_tensorflow_text
def snake_case_ ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a_ )
class snake_case_ ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
a_ : str = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ , "spiece.model" ) , "rb" ).read() )
a_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def snake_case_ ( self , a_ , *a_ , **a_ ):
a_ : Optional[int] = self.tokenizer.tokenize(a_ )
a_ , a_ : List[str] = text.pad_model_inputs(
a_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
a_ : Union[str, Any] = self.model.generate(input_ids=a_ , attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
a_ : List[str] = CompleteSentenceTransformer()
a_ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
a_ : int = complete_model(a_ )
a_ : Optional[Any] = tf.keras.Model(a_ , a_ )
keras_model.save(a_ )
def snake_case_ ( self ):
# Has PT equivalent: this test relies on random sampling
a_ : Tuple = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 1_0,
"temperature": 0.7,
}
a_ : List[Any] = 1_4
a_ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a_ : Any = "Hello, my dog is cute and"
a_ : Tuple = tokenizer(a_ , return_tensors="tf" )
a_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a_ : Tuple = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
a_ : List[str] = model.generate(**a_ , eos_token_id=a_ , **a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
a_ : Any = [6_3_8, 1_9_8]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
a_ : List[str] = model.generate(**a_ , eos_token_id=a_ , **a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case_ ( self ):
# Has PT equivalent: ample use of framework-specific code
a_ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
a_ : Any = "Hugging Face is a technology company based in New York and Paris."
a_ : List[Any] = bart_tokenizer(a_ , return_tensors="tf" ).input_ids
a_ : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
a_ : int = bart_model.generate(a_ ).numpy()
class snake_case_ ( a_ ):
def snake_case_ ( self , a_ , a_=None , **a_ ):
return super().call(a_ , **a_ )
a_ : Union[str, Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
a_ : List[Any] = bart_model.generate(a_ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(a_ , a_ ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
def snake_case_ ( self , a_ , **a_ ):
return super().call(a_ , **a_ )
a_ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared )
a_ : Optional[int] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
a_ : Union[str, Any] = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ , foo="bar" ) | 237 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = 'tokenizer_file'
SCREAMING_SNAKE_CASE_ : List[str] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase__ ( self ) -> List[Any]:
super().setUp()
lowercase_ : int = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , **_lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Any = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowercase_ : Dict = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase_ : List[Any] = tokenizer.batch_encode_plus(_lowercase )['input_ids']
self.assertListEqual(_lowercase , _lowercase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=6 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ : Dict = 'This is a simple input'
lowercase_ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase_ : List[Any] = ('This is a simple input', 'This is a pair')
lowercase_ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowercase_ : List[Any] = None # Hotfixing padding = None
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[str] = self.get_rust_tokenizer()
lowercase_ : Dict = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_lowercase )
lowercase_ : Optional[Any] = next(iter(_lowercase ) )['premise'] # pick up one data
lowercase_ : Any = list(sample_data.values() )
lowercase_ : int = list(map(tokenizer.encode , _lowercase ) )
lowercase_ : Optional[Any] = [tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) for x in output_tokens]
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> int:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowercase__ ( A_: Optional[Any] ) -> int:
"""simple docstring"""
__UpperCAmelCase =DPTConfig()
if "large" in checkpoint_url:
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =[5, 11, 17, 23]
__UpperCAmelCase =[256, 512, 1024, 1024]
__UpperCAmelCase =(1, 384, 384)
if "ade" in checkpoint_url:
__UpperCAmelCase =True
__UpperCAmelCase =150
__UpperCAmelCase ="""huggingface/label-files"""
__UpperCAmelCase ="""ade20k-id2label.json"""
__UpperCAmelCase =json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" ) ) , """r""" ) )
__UpperCAmelCase ={int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase =idalabel
__UpperCAmelCase ={v: k for k, v in idalabel.items()}
__UpperCAmelCase =[1, 150, 480, 480]
return config, expected_shape
def lowercase__ ( A_: List[Any] ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def lowercase__ ( A_: Any ) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__UpperCAmelCase =name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__UpperCAmelCase =name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__UpperCAmelCase =name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__UpperCAmelCase =name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__UpperCAmelCase =name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__UpperCAmelCase =name.replace("""proj""" , """projection""" )
if "blocks" in name:
__UpperCAmelCase =name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__UpperCAmelCase =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__UpperCAmelCase =name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__UpperCAmelCase =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__UpperCAmelCase =name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__UpperCAmelCase =name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__UpperCAmelCase =name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__UpperCAmelCase =name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__UpperCAmelCase =name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__UpperCAmelCase =name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__UpperCAmelCase =name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__UpperCAmelCase =int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__UpperCAmelCase =name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__UpperCAmelCase =name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__UpperCAmelCase =name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__UpperCAmelCase =name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__UpperCAmelCase =name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__UpperCAmelCase =name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__UpperCAmelCase =name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__UpperCAmelCase =name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__UpperCAmelCase =name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__UpperCAmelCase =name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__UpperCAmelCase =name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__UpperCAmelCase =name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowercase__ ( A_: Union[str, Any] , A_: str ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__UpperCAmelCase =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[: config.hidden_size, :]
__UpperCAmelCase =in_proj_bias[: config.hidden_size]
__UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def lowercase__ ( A_: Dict , A_: Optional[Any] , A_: List[Any] , A_: List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =get_dpt_config(snake_case__ )
# load original state_dict from URL
__UpperCAmelCase =torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase =state_dict.pop(snake_case__ )
__UpperCAmelCase =val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
__UpperCAmelCase =DPTForSemanticSegmentation(snake_case__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
__UpperCAmelCase =480 if """ade""" in checkpoint_url else 384
__UpperCAmelCase =DPTImageProcessor(size=snake_case__ )
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(snake_case__ , return_tensors="""pt""" )
# forward pass
__UpperCAmelCase =model(**snake_case__ ).logits if """ade""" in checkpoint_url else model(**snake_case__ ).predicted_depth
# Assert logits
__UpperCAmelCase =torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(snake_case__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case__ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case__ )
)
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
__A = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 68 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=64 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
A_ : Optional[Any] = parent
A_ : List[str] = batch_size
A_ : List[str] = seq_length
A_ : List[str] = is_training
A_ : Optional[Any] = use_input_mask
A_ : Dict = use_token_type_ids
A_ : Any = use_labels
A_ : Tuple = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Tuple = embedding_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : str = type_vocab_size
A_ : Optional[Any] = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : str = num_labels
A_ : Dict = num_choices
A_ : Any = scope
def lowerCamelCase(self ):
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : str = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Union[str, Any] = None
A_ : Union[str, Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase(self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = MobileBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
A_ : Union[str, Any] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
A_ : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Optional[int] = MobileBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Optional[int] = MobileBertForNextSentencePrediction(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Tuple = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : int = MobileBertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Union[str, Any] = MobileBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Tuple = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : int = self.num_labels
A_ : Optional[int] = MobileBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = self.num_labels
A_ : int = MobileBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = self.num_choices
A_ : int = MobileBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase(self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Dict = config_and_inputs
A_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Union[str, Any] = True
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
A_ : Optional[int] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
A_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
A_ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase(self ):
A_ : Optional[int] = MobileBertModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase(self ):
self.config_tester.run_common_tests()
def lowerCamelCase(self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase_ )
def __UpperCamelCase ( snake_case__ ):
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
_lowerCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase(self ):
A_ : Union[str, Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase_ )
A_ : Any = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
A_ : Dict = model(lowerCAmelCase_ )[0]
A_ : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A_ : Optional[int] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=lowerCAmelCase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
A_ : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
A_ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 180 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643 | 1 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
while queue:
UpperCAmelCase = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
if cnt != len(SCREAMING_SNAKE_CASE_ ):
print('''Cycle exists''' )
else:
print(SCREAMING_SNAKE_CASE_ )
# Adjacency List of Graph
a__ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 51 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( __UpperCamelCase ):
__a = (PNDMScheduler,)
__a = (("""num_inference_steps""", 50),)
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : List[str] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample
lowerCAmelCase__ : List[str] = 0.1 * sample
lowerCAmelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase__ : Optional[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Optional[Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Any = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Any = dict(self.forward_default_kwargs )
lowerCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[Any] = self.dummy_sample
lowerCAmelCase__ : int = 0.1 * sample
lowerCAmelCase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : int = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : Optional[Any] = dummy_past_residuals[:]
lowerCAmelCase__ : str = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : List[str] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Optional[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : str = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = 10
lowerCAmelCase__ : List[str] = self.dummy_model()
lowerCAmelCase__ : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : str = self.dummy_sample
lowerCAmelCase__ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , '''set_timesteps''' ):
lowerCAmelCase__ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ : int = dummy_past_residuals[:]
lowerCAmelCase__ : Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Any = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ : List[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Any = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.dummy_sample
lowerCAmelCase__ : Optional[int] = 0.1 * sample
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def lowercase_ ( self ):
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.full_loop()
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 233 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Dict = "ernie_m"
lowerCAmelCase__ :Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self ,UpperCAmelCase_ = 250_002 ,UpperCAmelCase_ = 768 ,UpperCAmelCase_ = 12 ,UpperCAmelCase_ = 12 ,UpperCAmelCase_ = 3_072 ,UpperCAmelCase_ = "gelu" ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 514 ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = 1E-0_5 ,UpperCAmelCase_=None ,UpperCAmelCase_=False ,UpperCAmelCase_=0.0 ,**UpperCAmelCase_ ,) -> str:
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = classifier_dropout
lowercase__ = is_decoder
lowercase__ = act_dropout
| 539 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(_snake_case )
lowercase__ = importlib.import_module(f'''.{module_name}''' ,"transformers.models" )
try:
return getattr(_snake_case ,_snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case ,"__name__" ,_snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(_snake_case ,_snake_case ):
return getattr(_snake_case ,_snake_case )
return None
def lowerCamelCase ( _snake_case : Union[str, os.PathLike] ,_snake_case : Optional[Union[str, os.PathLike]] = None ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : Optional[Dict[str, str]] = None ,_snake_case : Optional[Union[bool, str]] = None ,_snake_case : Optional[str] = None ,_snake_case : bool = False ,**_snake_case : Union[str, Any] ,):
'''simple docstring'''
lowercase__ = get_file_from_repo(
_snake_case ,_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,resume_download=_snake_case ,proxies=_snake_case ,use_auth_token=_snake_case ,revision=_snake_case ,local_files_only=_snake_case ,)
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_snake_case ,encoding="utf-8" ) as reader:
return json.load(_snake_case )
class snake_case :
def __init__( self ) -> List[str]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Tuple:
lowercase__ = kwargs.pop("config" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("trust_remote_code" ,UpperCAmelCase_ )
lowercase__ = True
lowercase__ , lowercase__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = config_dict.get("feature_extractor_type" ,UpperCAmelCase_ )
lowercase__ = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" ,{} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = AutoConfig.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
lowercase__ = getattr(UpperCAmelCase_ ,"feature_extractor_type" ,UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ ,"auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase__ = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowercase__ = feature_extractor_class_from_name(UpperCAmelCase_ )
lowercase__ = feature_extractor_auto_map is not None
lowercase__ = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowercase__ = resolve_trust_remote_code(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = kwargs.pop("code_revision" ,UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> Any:
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ ,UpperCAmelCase_ )
| 539 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
class UpperCamelCase :
def __init__( self ):
A__ = False
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not self.initialized:
A__ = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
A__ = True
def __A ( self ):
self.retriever.index.init_index()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ , A__ = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
A__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for worker in self.retrieval_workers
] )
def __A ( self ):
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A__ , A__ = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
A__ , A__ = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
A__ = kwargs.pop("config" , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
A__ = rag_tokenizer.question_encoder
A__ = rag_tokenizer.generator
if indexed_dataset is not None:
A__ = "custom"
A__ = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ )
else:
A__ = cls._build_index(UpperCAmelCase__ )
return cls(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
| 491 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : str ,_snake_case : int ):
'''simple docstring'''
lowercase__ = state_dict.pop(_snake_case )
lowercase__ = val
def lowerCamelCase ( _snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def lowerCamelCase ( _snake_case : List[str] ):
'''simple docstring'''
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def lowerCamelCase ( _snake_case : str ,_snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = image.size
lowercase__ = max(_snake_case ,_snake_case )
lowercase__ = 800 if "detection" in checkpoint_url else 1_000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase ( _snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase__ = F.to_tensor(_snake_case )
lowercase__ = F.normalize(_snake_case ,mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Tuple ):
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(_snake_case ,map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
lowercase__ = rename_backbone_keys(_snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(_snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(_snake_case )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" ,max_size=800 if "detection" in checkpoint_url else 1_000 )
lowercase__ = TableTransformerForObjectDetection(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" ,repo_type="dataset" ,filename=_snake_case )
lowercase__ = Image.open(_snake_case ).convert("RGB" )
lowercase__ = normalize(resize(_snake_case ,_snake_case ) ).unsqueeze(0 )
lowercase__ = model(_snake_case )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_snake_case ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_snake_case ,atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_snake_case )
image_processor.push_to_hub(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 711 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Optional[int] = "align_text_model"
def __init__( self ,UpperCAmelCase_=30_522 ,UpperCAmelCase_=768 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=3_072 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=512 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-1_2 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> List[str]:
super().__init__(**UpperCAmelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = pad_token_id
@classmethod
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowercase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Tuple = "align_vision_model"
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 600 ,UpperCAmelCase_ = 2.0 ,UpperCAmelCase_ = 3.1 ,UpperCAmelCase_ = 8 ,UpperCAmelCase_ = [3, 3, 5, 3, 5, 5, 3] ,UpperCAmelCase_ = [32, 16, 24, 40, 80, 112, 192] ,UpperCAmelCase_ = [16, 24, 40, 80, 112, 192, 320] ,UpperCAmelCase_ = [] ,UpperCAmelCase_ = [1, 2, 2, 2, 1, 2, 1] ,UpperCAmelCase_ = [1, 2, 2, 3, 3, 4, 1] ,UpperCAmelCase_ = [1, 6, 6, 6, 6, 6, 6] ,UpperCAmelCase_ = 0.25 ,UpperCAmelCase_ = "swish" ,UpperCAmelCase_ = 2_560 ,UpperCAmelCase_ = "mean" ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = 0.0_01 ,UpperCAmelCase_ = 0.99 ,UpperCAmelCase_ = 0.2 ,**UpperCAmelCase_ ,) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = width_coefficient
lowercase__ = depth_coefficient
lowercase__ = depth_divisor
lowercase__ = kernel_sizes
lowercase__ = in_channels
lowercase__ = out_channels
lowercase__ = depthwise_padding
lowercase__ = strides
lowercase__ = num_block_repeats
lowercase__ = expand_ratios
lowercase__ = squeeze_expansion_ratio
lowercase__ = hidden_act
lowercase__ = hidden_dim
lowercase__ = pooling_type
lowercase__ = initializer_range
lowercase__ = batch_norm_eps
lowercase__ = batch_norm_momentum
lowercase__ = drop_connect_rate
lowercase__ = sum(UpperCAmelCase_ ) * 4
@classmethod
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Union[str, Any] = "align"
lowerCAmelCase__ :Tuple = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=640 ,UpperCAmelCase_=1.0 ,UpperCAmelCase_=0.02 ,**UpperCAmelCase_ ,) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
if text_config is None:
lowercase__ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowercase__ = AlignTextConfig(**UpperCAmelCase_ )
lowercase__ = AlignVisionConfig(**UpperCAmelCase_ )
lowercase__ = projection_dim
lowercase__ = temperature_init_value
lowercase__ = initializer_range
@classmethod
def _a ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.text_config.to_dict()
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 539 | 0 |