code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ :Tuple = logging.get_logger()
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] =field(default_factory=a )
UpperCamelCase__ : list =field(default_factory=a )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ , nn.Convad ) or isinstance(lowerCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int =1
UpperCamelCase__ : List =field(default_factory=a )
UpperCamelCase__ : List =field(default_factory=a )
UpperCamelCase__ : bool =True
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =Tracker(self.dest )(lowerCamelCase__ ).parametrized
__UpperCamelCase : Dict =Tracker(self.src )(lowerCamelCase__ ).parametrized
__UpperCamelCase : Union[str, Any] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip , lowerCamelCase__ ) )
__UpperCamelCase : Tuple =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip , lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while'
f' destination module has {len(lowerCamelCase__ )}.' )
for dest_m, src_m in zip(lowerCamelCase__ , lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : List[Tuple[str, nn.Module]] =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
__UpperCamelCase : Dict =len(lowerCamelCase__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__UpperCamelCase : Any =nn.ModuleDict(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowerCamelCase__ , out_feat_keys=lowerCamelCase__ , feature_blocks=self._feature_blocks , )
class __A ( a ):
"""simple docstring"""
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , lowerCamelCase__ ):
"""simple docstring"""
if x not in self:
__UpperCamelCase : Any =self.convert_name_to_timm(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =partial(lambda: (timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ ).eval(), None) )
else:
__UpperCamelCase : Dict =super().__getitem__(lowerCamelCase__ )
return val
class __A ( a ):
"""simple docstring"""
def __getitem__( self , lowerCamelCase__ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__UpperCamelCase : str =RegNetModel
else:
__UpperCamelCase : Union[str, Any] =RegNetForImageClassification
return val
def A ( a_ ,a_ ,a_ ) -> Any:
for from_key, to_key in keys:
__UpperCamelCase : str =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def A ( a_ ,a_ ,a_ ,a_ ,a_ ,a_ = True ,) -> Union[str, Any]:
print(F'Converting {name}...' )
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase : str =from_model_func()
__UpperCamelCase : Dict =our_model_func(a_ ).eval()
__UpperCamelCase : List[Any] =ModuleTransfer(src=a_ ,dest=a_ ,raise_if_mismatch=a_ )
__UpperCamelCase : List[str] =torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
if from_state_dict is not None:
__UpperCamelCase : Dict =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__UpperCamelCase : Optional[Any] =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
__UpperCamelCase : List[str] =manually_copy_vissl_head(a_ ,our_model.state_dict() ,a_ )
our_model.load_state_dict(a_ )
__UpperCamelCase : Optional[Any] =our_model(a_ ,output_hidden_states=a_ )
__UpperCamelCase : Union[str, Any] =(
our_outputs.logits if isinstance(a_ ,a_ ) else our_outputs.last_hidden_state
)
__UpperCamelCase : int =from_model(a_ )
__UpperCamelCase : Optional[Any] =from_output[-1] if type(a_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__UpperCamelCase : Optional[Any] =our_outputs.hidden_states[-1]
assert torch.allclose(a_ ,a_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name ,commit_message='Add model' ,use_temp_dir=a_ ,)
__UpperCamelCase : List[Any] =224 if 'seer' not in name else 384
# we can use the convnext one
__UpperCamelCase : List[str] =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ,size=a_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name ,commit_message='Add image processor' ,use_temp_dir=a_ ,)
print(F'Pushed {name}' )
def A ( a_ ,a_ = None ,a_ = True ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] ='imagenet-1k-id2label.json'
__UpperCamelCase : str =1_000
__UpperCamelCase : int =(1, num_labels)
__UpperCamelCase : Tuple ='huggingface/label-files'
__UpperCamelCase : Tuple =num_labels
__UpperCamelCase : Dict =json.load(open(cached_download(hf_hub_url(a_ ,a_ ,repo_type='dataset' ) ) ,'r' ) )
__UpperCamelCase : Any ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : Optional[Any] =idalabel
__UpperCamelCase : Tuple ={v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple =partial(a_ ,num_labels=a_ ,idalabel=a_ ,labelaid=a_ )
__UpperCamelCase : Optional[Any] ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ,layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] ,hidden_sizes=[32, 64, 160, 384] ,groups_width=16 ,layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] ,hidden_sizes=[48, 96, 240, 528] ,groups_width=24 ,layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] ,hidden_sizes=[64, 128, 288, 672] ,groups_width=16 ,layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] ,hidden_sizes=[72, 168, 408, 912] ,groups_width=24 ,layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] ,hidden_sizes=[96, 192, 432, 1_008] ,groups_width=48 ,layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] ,hidden_sizes=[80, 240, 560, 1_360] ,groups_width=40 ,layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] ,hidden_sizes=[168, 392, 784, 1_624] ,groups_width=56 ,layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] ,hidden_sizes=[80, 240, 720, 1_920] ,groups_width=120 ,layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2_240] ,groups_width=112 ,layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] ,hidden_sizes=[256, 512, 896, 2_048] ,groups_width=128 ,layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] ,hidden_sizes=[336, 672, 1_344, 2_520] ,groups_width=168 ,layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] ,hidden_sizes=[48, 104, 208, 440] ,groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] ,hidden_sizes=[48, 112, 256, 608] ,groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] ,hidden_sizes=[64, 128, 320, 768] ,groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] ,hidden_sizes=[48, 120, 336, 888] ,groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] ,hidden_sizes=[72, 216, 576, 1_512] ,groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] ,hidden_sizes=[128, 192, 512, 1_088] ,groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] ,hidden_sizes=[144, 288, 576, 1_296] ,groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] ,hidden_sizes=[168, 448, 896, 2_016] ,groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2_240] ,groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] ,hidden_sizes=[224, 448, 1_232, 3_024] ,groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1_392, 3_712] ,groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1_392, 3_712] ,groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1_968, 4_920] ,groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1_056, 2_904, 7_392] ,groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1_696, 2_544, 5_088] ,groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[2_020, 4_040, 11_110, 28_280] ,groups_width=1_010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1_392, 3_712] ,groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1_968, 4_920] ,groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1_056, 2_904, 7_392] ,groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1_696, 2_544, 5_088] ,groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[2_020, 4_040, 11_110, 28_280] ,groups_width=1_010 ),
}
__UpperCamelCase : Optional[int] =NameToOurModelFuncMap()
__UpperCamelCase : Optional[Any] =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(a_ ,a_ ) -> Tuple[nn.Module, Dict]:
__UpperCamelCase : Dict =torch.hub.load_state_dict_from_url(a_ ,model_dir=str(a_ ) ,map_location='cpu' )
__UpperCamelCase : List[Any] =model_func()
# check if we have a head, if yes add it
__UpperCamelCase : str =files['classy_state_dict']['base_model']['model']
__UpperCamelCase : Any =model_state_dict['trunk']
model.load_state_dict(a_ )
return model.eval(), model_state_dict["heads"]
# pretrained
__UpperCamelCase : Union[str, Any] =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
__UpperCamelCase : Optional[int] =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
__UpperCamelCase : str =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,)
__UpperCamelCase : Optional[int] =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' ,lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 ,group_width=1_010 ,w_a=1_744 ,w_a=620.83 ,w_m=2.52 ) ) ) ,)
# IN1K finetuned
__UpperCamelCase : Union[str, Any] =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
__UpperCamelCase : Optional[int] =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
__UpperCamelCase : Union[str, Any] =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,)
__UpperCamelCase : str =partial(
a_ ,'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' ,lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 ,group_width=1_010 ,w_a=1_744 ,w_a=620.83 ,w_m=2.52 ) ) ) ,)
if model_name:
convert_weight_and_push(
a_ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,names_to_config[model_name] ,a_ ,a_ ,)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
a_ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,a_ ,a_ ,a_ ,)
return config, expected_shape
if __name__ == "__main__":
A_ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ :List[Any] = parser.parse_args()
A_ :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 71 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
lowercase = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
lowercase = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', 'stage2.cls_token') )
return token
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = 'imagenet-1k-id2label.json'
lowercase = 10_00
lowercase = 'huggingface/label-files'
lowercase = num_labels
lowercase = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
lowercase = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = lowercase = CvtConfig(num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowercase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowercase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase = [2, 2, 20]
lowercase = [3, 12, 16]
lowercase = [1_92, 7_68, 10_24]
lowercase = CvtForImageClassification(__snake_case )
lowercase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowercase = image_size
lowercase = torch.load(__snake_case , map_location=torch.device('cpu' ) )
lowercase = OrderedDict()
lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase = list_of_state_dict + cls_token(__snake_case )
lowercase = list_of_state_dict + embeddings(__snake_case )
for cnt in range(config.depth[idx] ):
lowercase = list_of_state_dict + attention(__snake_case , __snake_case )
lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__snake_case )
for i in range(len(__snake_case ) ):
lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 220 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str=13 , _lowerCAmelCase : Tuple=30 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=32 , _lowerCAmelCase : int=2 , _lowerCAmelCase : int=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[Any]=0.6 , _lowerCAmelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = mask_ratio
SCREAMING_SNAKE_CASE_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[int] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = TFViTMAEModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = TFViTMAEForPreTraining(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , training=_lowerCamelCase )
# expected sequence length = num_patches
SCREAMING_SNAKE_CASE_ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFViTMAEForPreTraining(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , training=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE_) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = TFViTMAEModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , noise=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase , noise=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs_dict[0].numpy()
SCREAMING_SNAKE_CASE_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCAmelCase_ ( self : List[Any] ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = v.numpy()
else:
SCREAMING_SNAKE_CASE_ = np.array(_lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = prepare_numpy_arrays(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , noise=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE_ = tf.constant(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE_ = tf_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( self : Dict ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),)
if isinstance(_lowerCamelCase , _lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCamelCase , '_keras_serializable' , _lowerCamelCase )
}
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(_lowerCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
SCREAMING_SNAKE_CASE_ = main_layer_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
SCREAMING_SNAKE_CASE_ = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCamelCase , 'keras_model.h5' )
model.save(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tf.keras.models.load_model(
_lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCamelCase , tf.keras.Model )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : str ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state.numpy()
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = outputs.logits.numpy()
SCREAMING_SNAKE_CASE_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE_ = after_outputs['''last_hidden_state'''].numpy()
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = after_outputs['''logits'''].numpy()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1E-5 )
def lowerCAmelCase_ ( self : Optional[Any] ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , noise=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
SCREAMING_SNAKE_CASE_ = model_class.from_config(model.config )
SCREAMING_SNAKE_CASE_ = new_model(_lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
SCREAMING_SNAKE_CASE_ = new_model(_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowerCAmelCase_ ( self : List[str] ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( ) -> List[str]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : int ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCamelCase , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE_ = ViTMAEConfig()
SCREAMING_SNAKE_CASE_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(1, num_patches) )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase , noise=_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 ) | 354 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Dict ):
raise NotImplementedError() | 210 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_):
if isinstance(a_ , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(a_ , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(a_):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''')
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
super().__init__(**lowerCAmelCase__ )
snake_case_ = size if size is not None else {"shortest_edge": 2_24}
snake_case_ = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ = get_size_dict(lowerCAmelCase__ , param_name='crop_size' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self , a , a , a = PILImageResampling.BILINEAR , a = None , **a , ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(lowerCAmelCase__ , size['shortest_edge'] , default_to_square=lowerCAmelCase__ )
elif "height" in size and "width" in size:
snake_case_ = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size['height'], size['width']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a , a = None , **a , ) -> int:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ = to_numpy_array(lowerCAmelCase__ )
if do_resize:
snake_case_ = self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ )
if do_center_crop:
snake_case_ = self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__ )
if do_rescale:
snake_case_ = self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ )
if do_normalize:
snake_case_ = self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ )
snake_case_ = to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ )
return image
def _UpperCamelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__ , param_name='crop_size' )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
snake_case_ = make_batched(lowerCAmelCase__ )
snake_case_ = [
[
self._preprocess_image(
image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , )
for img in video
]
for video in videos
]
snake_case_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 178 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case=False ) -> Any:
lowercase__: Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowercase__: List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def snake_case_ ( snake_case , snake_case , snake_case=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__: Optional[Any] = ''
else:
lowercase__: Optional[int] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__: List[str] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowercase__: List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__: Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowercase__: Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__: List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__: Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__: str = in_proj_weight[
-config.hidden_size :, :
]
lowercase__: List[str] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( snake_case , snake_case , snake_case ) -> Union[str, Any]:
lowercase__: List[Any] = dct.pop(snake_case )
lowercase__: Optional[int] = val
def snake_case_ ( ) -> Tuple:
lowercase__: str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__: Dict = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def snake_case_ ( snake_case , snake_case ) -> List[Any]:
lowercase__: Optional[Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowercase__: Any = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowercase__: List[str] = 10_00
lowercase__: Union[str, Any] = 'huggingface/label-files'
lowercase__: List[str] = 'imagenet-1k-id2label.json'
lowercase__: Optional[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
lowercase__: Dict = {int(snake_case ): v for k, v in idalabel.items()}
lowercase__: Union[str, Any] = idalabel
lowercase__: Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__: List[str] = int(deit_name[-6:-4] )
lowercase__: Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
lowercase__: str = 1_92
lowercase__: List[str] = 7_68
lowercase__: str = 12
lowercase__: int = 3
elif deit_name[9:].startswith('small' ):
lowercase__: Optional[int] = 3_84
lowercase__: Any = 15_36
lowercase__: str = 12
lowercase__: Union[str, Any] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
lowercase__: Union[str, Any] = 10_24
lowercase__: int = 40_96
lowercase__: List[Any] = 24
lowercase__: Any = 16
# load original model from timm
lowercase__: Tuple = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__: List[str] = timm_model.state_dict()
lowercase__: Optional[Any] = create_rename_keys(snake_case , snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
# load HuggingFace model
lowercase__: Optional[Any] = DeiTForImageClassificationWithTeacher(snake_case ).eval()
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by DeiTImageProcessor
lowercase__: Union[str, Any] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowercase__: str = DeiTImageProcessor(size=snake_case , crop_size=config.image_size )
lowercase__: int = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__: str = encoding['pixel_values']
lowercase__: Optional[int] = model(snake_case )
lowercase__: List[str] = timm_model(snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1e-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 288 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1 / 255 , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , ) -> List[str]:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__: Tuple = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Any = num_channels
lowercase__: str = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Any = do_resize
lowercase__: str = size
lowercase__: Any = do_rescale
lowercase__: Union[str, Any] = rescale_factor
lowercase__: Optional[int] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: List[str] = image_std
lowercase__: Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
if not batched:
lowercase__: List[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__: List[str] = image.size
else:
lowercase__ , lowercase__: str = image.shape[1], image.shape[2]
if w < h:
lowercase__: Optional[int] = int(self.size['shortest_edge'] * h / w )
lowercase__: int = self.size['shortest_edge']
elif w > h:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: int = int(self.size['shortest_edge'] * w / h )
else:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: Optional[Any] = self.size['shortest_edge']
else:
lowercase__: str = []
for image in image_inputs:
lowercase__ , lowercase__: Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase__: Any = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Tuple = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
lowercase__: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase__: Dict = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image and target
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__: Optional[int] = json.loads(f.read() )
lowercase__: Optional[Any] = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__: Optional[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowercase__: List[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify orig_size
lowercase__: Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
lowercase__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__: Tuple = json.loads(f.read() )
lowercase__: Tuple = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__: List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowercase__: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify masks
lowercase__: str = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowercase__: Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
| 288 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> str:
a : Dict = 0
def __a ( self ) -> List[str]:
a : List[str] = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
a : List[str] = Path(lowerCAmelCase__ ) / "preprocessor_config.json"
a : Optional[Any] = Path(lowerCAmelCase__ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) )
a : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
a : Union[str, Any] = Path(lowerCAmelCase__ ) / "preprocessor_config.json"
a : str = Path(lowerCAmelCase__ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) )
a : Optional[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
a : Optional[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a : List[str] = Path(lowerCAmelCase__ ) / "preprocessor_config.json"
a : Optional[Any] = Path(lowerCAmelCase__ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a : List[str] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ).to_dict()
config_dict.pop("image_processor_type" )
a : Optional[int] = CLIPImageProcessor(**lowerCAmelCase__ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
config.save_pretrained(lowerCAmelCase__ )
a : str = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
# make sure private variable is not incorrectly saved
a : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
a : Dict = Path(lowerCAmelCase__ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , )
a : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Any:
with self.assertRaisesRegex(
lowerCAmelCase__ , "clip-base is not a local folder and is not a valid model identifier" ):
a : Union[str, Any] = AutoImageProcessor.from_pretrained("clip-base" )
def __a ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def __a ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
a : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def __a ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
a : Union[str, Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
a : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ )
a : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
a : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def __a ( self ) -> int:
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a : List[Any] = Path(lowerCAmelCase__ ) / "preprocessor_config.json"
a : Any = Path(lowerCAmelCase__ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) )
a : Optional[int] = CustomImageProcessor.from_pretrained(lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
a : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self ) -> int:
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple =True
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
a : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a : Optional[int] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(lowerCAmelCase__ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 105 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a : Union[str, Any] = grid[0]
for row_n in range(1 , len(_lowercase ) ):
a : Optional[Any] = grid[row_n]
a : Tuple = fill_row(_lowercase , _lowercase )
a : List[Any] = grid[row_n]
return grid[-1][-1]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
lowerCAmelCase__ : Dict ={
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """roc_bert"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=9_1_0 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2_4_8_5_8 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = enable_pronunciation
SCREAMING_SNAKE_CASE_ : Any = enable_shape
SCREAMING_SNAKE_CASE_ : List[Any] = pronunciation_embed_dim
SCREAMING_SNAKE_CASE_ : List[Any] = pronunciation_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = shape_embed_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = shape_vocab_size
SCREAMING_SNAKE_CASE_ : Any = concat_input
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 162 |
from collections.abc import Sequence
def a__ ( A__, A__ = False ):
if not arr:
return 0
SCREAMING_SNAKE_CASE_ : str = 0 if allow_empty_subarrays else float('-inf' )
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
for num in arr:
SCREAMING_SNAKE_CASE_ : int = max(0 if allow_empty_subarrays else num, curr_sum + num )
SCREAMING_SNAKE_CASE_ : List[Any] = max(A__, A__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ : Union[str, Any] =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 162 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase=None , __lowerCamelCase=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=__lowerCamelCase )
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str] = list_field(
default=[] ,metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} ,)
lowerCAmelCase : List[int] = list_field(
default=[8] ,metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCAmelCase : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] ,metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Use FP16 to accelerate inference."} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Benchmark training of model"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Verbose memory tracing"} )
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} ,)
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Trace memory line by line"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Save result to a CSV file"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Save all print statements in a log file"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Whether to print environment information"} )
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} ,)
lowerCAmelCase : str = field(
default=F"inference_time_{round(time() )}.csv" ,metadata={"help": "CSV filename used if saving time results to csv."} ,)
lowerCAmelCase : str = field(
default=F"inference_memory_{round(time() )}.csv" ,metadata={"help": "CSV filename used if saving memory results to csv."} ,)
lowerCAmelCase : str = field(
default=F"train_time_{round(time() )}.csv" ,metadata={"help": "CSV filename used if saving time results to csv for training."} ,)
lowerCAmelCase : str = field(
default=F"train_memory_{round(time() )}.csv" ,metadata={"help": "CSV filename used if saving memory results to csv for training."} ,)
lowerCAmelCase : str = field(
default=F"env_info_{round(time() )}.csv" ,metadata={"help": "CSV filename used if saving environment information."} ,)
lowerCAmelCase : str = field(
default=F"log_{round(time() )}.csv" ,metadata={"help": "Log filename used if print statements are saved in log."} ,)
lowerCAmelCase : int = field(default=3 ,metadata={"help": "Times an experiment will be run."} )
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} ,)
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' ,_snake_case ,)
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 16 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = 10, 0.0
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 10, 0.0
scheduler.set_timesteps(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : int = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[Any] = samplea.shape[0]
UpperCAmelCase_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : int = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
UpperCAmelCase_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Optional[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.full_loop()
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : int = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 61 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase_( ctypes.Structure ):
'''simple docstring'''
__lowercase : Any = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if os.name == "nt":
lowerCAmelCase__ : int = CursorInfo()
lowerCAmelCase__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
lowerCAmelCase__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if os.name == "nt":
lowerCAmelCase__ : Any = CursorInfo()
lowerCAmelCase__ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 184 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : str = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( lowerCamelCase__ : Matrix , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( lowerCamelCase__ : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( lowerCamelCase__ : Matrix ) -> Matrix | None:
if location := find_empty_location(lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ : Optional[int] =digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
lowerCamelCase_ : Optional[int] =0
return None
def _snake_case ( lowerCamelCase__ : Matrix ) -> None:
for row in grid:
for cell in row:
print(lowerCamelCase__ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
A__ : Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 144 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case ( lowerCamelCase__ : Tuple ) -> List[Any]:
lowerCamelCase_ : Union[str, Any] =384
if "tiny" in model_name:
lowerCamelCase_ : str =[3, 3, 9, 3]
lowerCamelCase_ : Union[str, Any] =[96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase_ : Tuple =[3, 3, 27, 3]
lowerCamelCase_ : List[str] =[96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase_ : Tuple =[3, 3, 27, 3]
lowerCamelCase_ : Tuple =[128, 256, 512, 1_024]
lowerCamelCase_ : str =512
if "large" in model_name:
lowerCamelCase_ : Optional[int] =[3, 3, 27, 3]
lowerCamelCase_ : Optional[int] =[192, 384, 768, 1_536]
lowerCamelCase_ : Optional[Any] =768
if "xlarge" in model_name:
lowerCamelCase_ : str =[3, 3, 27, 3]
lowerCamelCase_ : Optional[Any] =[256, 512, 1_024, 2_048]
lowerCamelCase_ : Any =1_024
# set label information
lowerCamelCase_ : Dict =150
lowerCamelCase_ : Union[str, Any] ="huggingface/label-files"
lowerCamelCase_ : Optional[int] ="ade20k-id2label.json"
lowerCamelCase_ : str =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ : Dict ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[Any] ={v: k for k, v in idalabel.items()}
lowerCamelCase_ : Optional[int] =ConvNextConfig(
depths=lowerCamelCase__ , hidden_sizes=lowerCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCamelCase_ : Any =UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def _snake_case ( lowerCamelCase__ : str ) -> str:
lowerCamelCase_ : List[str] =[]
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> Dict:
lowerCamelCase_ : List[str] =dct.pop(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =val
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase_ : Union[str, Any] ={
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowerCamelCase_ : Optional[int] =model_name_to_url[model_name]
lowerCamelCase_ : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["state_dict"]
lowerCamelCase_ : List[Any] =get_upernet_config(lowerCamelCase__ )
lowerCamelCase_ : Tuple =UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ : Optional[Any] =state_dict.pop(lowerCamelCase__ )
if "bn" in key:
lowerCamelCase_ : str =key.replace("bn" , "batch_norm" )
lowerCamelCase_ : Union[str, Any] =val
# rename keys
lowerCamelCase_ : Tuple =create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
lowerCamelCase_ : List[str] ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase_ : Union[str, Any] =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
lowerCamelCase_ : List[str] =SegformerImageProcessor()
lowerCamelCase_ : int =processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowerCamelCase_ : Tuple =model(lowerCamelCase__ )
if model_name == "upernet-convnext-tiny":
lowerCamelCase_ : List[Any] =torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase_ : Dict =torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase_ : Tuple =torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase_ : Dict =torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase_ : List[Any] =torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 144 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( snake_case_ ):
lowerCamelCase_ : List[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : Any = '''FlavaImageProcessor'''
lowerCamelCase_ : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __magic_name__ , )
snake_case_ : Any = kwargs.pop('''feature_extractor''' )
snake_case_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = self.image_processor
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ : Tuple = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
snake_case_ : Union[str, Any] = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer.model_input_names
snake_case_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , )
return self.image_processor
| 353 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Dict:
"""simple docstring"""
a_ : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[int]:
"""simple docstring"""
a_ : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
a_ : Tuple = s_dict.pop(__A )
elif "subsample" in key:
a_ : int = s_dict.pop(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Dict:
"""simple docstring"""
a_ , a_ : List[Any] = emb.weight.shape
a_ : Optional[Any] = nn.Linear(__A , __A , bias=__A )
a_ : List[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = torch.load(__A , map_location='cpu' )
a_ : List[str] = mam_aaa['args']
a_ : Union[str, Any] = mam_aaa['model']
a_ : Optional[Any] = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(__A )
rename_keys(__A )
a_ : int = state_dict['decoder.embed_tokens.weight'].shape[0]
a_ : Dict = args.share_decoder_input_output_embed
a_ : Tuple = [int(__A ) for i in args.conv_kernel_sizes.split(',' )]
a_ : Union[str, Any] = SpeechaTextConfig(
vocab_size=__A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(__A ) , conv_channels=args.conv_channels , conv_kernel_sizes=__A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__A , num_beams=5 , max_length=2_00 , use_cache=__A , decoder_start_token_id=2 , early_stopping=__A , )
a_ : Optional[int] = SpeechaTextForConditionalGeneration(__A )
a_ , a_ : Optional[int] = model.model.load_state_dict(__A , strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a_ : Dict = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a_ : Dict = lm_head_weights
model.save_pretrained(__A )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 32 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
__a = {
'gpt2': 1_024,
'gpt2-medium': 1_024,
'gpt2-large': 1_024,
'gpt2-xl': 1_024,
'distilgpt2': 1_024,
}
class A__ ( UpperCamelCase__ ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Optional[int] = GPTaTokenizer
def __init__( self : Any , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict="<|endoftext|>" , lowerCAmelCase__ : Tuple="<|endoftext|>" , lowerCAmelCase__ : List[str]="<|endoftext|>" , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
_UpperCAmelCase : int = kwargs.pop("add_bos_token" , UpperCamelCase_ )
_UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
_UpperCAmelCase : int = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase : str = add_prefix_space
_UpperCAmelCase : List[str] = pre_tok_class(**UpperCamelCase_ )
_UpperCAmelCase : Union[str, Any] = add_prefix_space
def _lowerCAmelCase ( self : Dict , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
_UpperCAmelCase : Dict = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCAmelCase ( self : Tuple , *lowerCAmelCase__ : int , **lowerCAmelCase__ : int ) -> BatchEncoding:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : "Conversation" ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
_UpperCAmelCase : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids | 354 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_SCREAMING_SNAKE_CASE = model_type_to_module_name(_a )
_SCREAMING_SNAKE_CASE = importlib.import_module(F'.{module_name}' ,"""transformers.models""" )
try:
return getattr(_a ,_a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_a ,"""__name__""" ,_a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_SCREAMING_SNAKE_CASE = importlib.import_module("""transformers""" )
if hasattr(_a ,_a ):
return getattr(_a ,_a )
return None
def __lowerCamelCase ( snake_case__ ,snake_case__ = None ,snake_case__ = False ,snake_case__ = False ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,**snake_case__ ,) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_file_from_repo(
_a ,_a ,cache_dir=_a ,force_download=_a ,resume_download=_a ,proxies=_a ,use_auth_token=_a ,revision=_a ,local_files_only=_a ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_a ,encoding="""utf-8""" ) as reader:
return json.load(_a )
class __UpperCAmelCase :
def __init__( self: Optional[int] ):
'''simple docstring'''
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def UpperCamelCase ( cls: Optional[int] , UpperCAmelCase_: List[str] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.pop("""config""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = kwargs.pop("""trust_remote_code""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config_dict.get("""image_processor_type""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
_SCREAMING_SNAKE_CASE = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_SCREAMING_SNAKE_CASE = config_dict.pop("""feature_extractor_type""" , UpperCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
_SCREAMING_SNAKE_CASE = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_SCREAMING_SNAKE_CASE = config_dict["auto_map"]["AutoFeatureExtractor"]
_SCREAMING_SNAKE_CASE = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.image_processor_type``
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , """image_processor_type""" , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
_SCREAMING_SNAKE_CASE = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_SCREAMING_SNAKE_CASE = image_processor_class_from_name(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor_auto_map is not None
_SCREAMING_SNAKE_CASE = image_processor_class is not None or type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
_SCREAMING_SNAKE_CASE = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
_SCREAMING_SNAKE_CASE = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = kwargs.pop("""code_revision""" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_SCREAMING_SNAKE_CASE = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase_ )]
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 306 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :List[Any] = KandinskyVaaImgaImgPipeline
_lowerCamelCase :Any = ["image_embeds", "negative_image_embeds", "image"]
_lowerCamelCase :Any = [
"image_embeds",
"negative_image_embeds",
"image",
]
_lowerCamelCase :int = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_lowerCamelCase :List[Any] = False
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
return 1_00
@property
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase__ : Any = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.dummy_unet
lowerCAmelCase__ : Union[str, Any] = self.dummy_movq
lowerCAmelCase__ : Any = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase__ : int = DDIMScheduler(**UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=0 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
lowerCAmelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create init_image
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
lowerCAmelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Tuple = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(UpperCamelCase ).startswith("""mps""" ):
lowerCAmelCase__ : Dict = torch.manual_seed(UpperCamelCase )
else:
lowerCAmelCase__ : int = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """cpu"""
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Union[str, Any] = self.pipeline_class(**UpperCamelCase )
lowerCAmelCase__ : Tuple = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowerCAmelCase__ : List[str] = output.images
lowerCAmelCase__ : Dict = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
lowerCAmelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase__ : Dict = """A red cartoon frog, 4k"""
lowerCAmelCase__ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
lowerCAmelCase__ : Dict = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
lowerCAmelCase__ : Tuple = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
lowerCAmelCase__ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase__ : Union[str, Any] = pipeline(
image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
lowerCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 360 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowercase_ ( __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCamelCase :
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Any=None , **UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any=None , **UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None , **UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Tuple=None , **UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
lowerCAmelCase__ : int = after_output[0].numpy()
lowerCAmelCase__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase , 1E-5 )
def _lowerCAmelCase ( self : int , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=None , **UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : Dict = model(
input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Any = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : float ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase , UpperCamelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase )
@slow
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : Union[str, Any] = model_a(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = model_a(**UpperCamelCase )
lowerCAmelCase__ : Dict = after_outputs[0].numpy()
lowerCAmelCase__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase , 1E-5 )
@require_tf
class _lowerCamelCase ( a_ , unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
lowerCAmelCase__ : Optional[Any] = 13
lowerCAmelCase__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = TFViTModel(UpperCamelCase , name="""vision_model""" )
lowerCAmelCase__ : Any = TFBertModel(UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = TFViTModelTester(self )
lowerCAmelCase__ : str = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( a_ , unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
lowerCAmelCase__ : str = 13
lowerCAmelCase__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str=None , **UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model(
input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : Dict = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Any = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : Optional[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCAmelCase ( self : int , UpperCamelCase : Any , UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = TFDeiTModel(UpperCamelCase , name="""vision_model""" )
lowerCAmelCase__ : str = TFRobertaModel(UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TFDeiTModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFRobertaModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( a_ , unittest.TestCase ):
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
lowerCAmelCase__ : Any = 13
lowerCAmelCase__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : str = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = TFCLIPVisionModel(UpperCamelCase , name="""vision_model""" )
lowerCAmelCase__ : List[str] = TFBertModel(UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : int = TFBertModelTester(self )
lowerCAmelCase__ : str = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : Dict = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=UpperCamelCase )
lowerCAmelCase__ : Any = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase__ : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCamelCase , padding=UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : Tuple = model(**UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , UpperCamelCase , atol=1E-3 ) )
| 212 | 0 |
import cva
import numpy as np
class a__ :
def __init__( self : Any,_A : float,_A : int ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = k
SCREAMING_SNAKE_CASE_ : str = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Union[str, Any] ):
"""simple docstring"""
return str(self.k )
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = cva.imread(_A,0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = img.shape
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = img.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = cva.cvtColor(_A,cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = np.gradient(_A )
SCREAMING_SNAKE_CASE_ : str = dx**2
SCREAMING_SNAKE_CASE_ : Optional[Any] = dy**2
SCREAMING_SNAKE_CASE_ : Optional[Any] = dx * dy
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.04
SCREAMING_SNAKE_CASE_ : str = self.window_size // 2
for y in range(_A,h - offset ):
for x in range(_A,w - offset ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : int = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ : Optional[int] = wxx + wyy
SCREAMING_SNAKE_CASE_ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0),0 )
color_img.itemset((y, x, 1),0 )
color_img.itemset((y, x, 2),255 )
return color_img, corner_list
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
__lowerCamelCase , __lowerCamelCase : List[Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 18 | import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( lowerCamelCase__ : list[list[float]] ) -> list[list[float]]:
lowerCamelCase_ : List[str] =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCamelCase_ : Any =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase_ : str =[[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =matrix[1][1], matrix[0][0]
lowerCamelCase_ , lowerCamelCase_ : int =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase_ : int =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
lowerCamelCase_ : Optional[Any] =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCamelCase_ : Union[str, Any] =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCamelCase_ : Any =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCamelCase_ : List[str] =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCamelCase_ : int =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCamelCase_ : Any =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCamelCase_ : str =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCamelCase_ : List[str] =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCamelCase_ : Optional[Any] =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCamelCase_ : List[str] =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase_ : Optional[int] =array(lowerCamelCase__ )
for i in range(3 ):
for j in range(3 ):
lowerCamelCase_ : Optional[int] =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase_ : Tuple =array(lowerCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 209 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
if not head:
return True
# split the list to two parts
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =head.next, head
while fast and fast.next:
lowerCamelCase_ : Optional[Any] =fast.next.next
lowerCamelCase_ : str =slow.next
lowerCamelCase_ : Tuple =slow.next
lowerCamelCase_ : Any =None # Don't forget here! But forget still works!
# reverse the second part
lowerCamelCase_ : List[str] =None
while second:
lowerCamelCase_ : Any =second.next
lowerCamelCase_ : Union[str, Any] =node
lowerCamelCase_ : Union[str, Any] =second
lowerCamelCase_ : Optional[Any] =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCamelCase_ : List[str] =node.next
lowerCamelCase_ : Optional[Any] =head.next
return True
def _snake_case ( lowerCamelCase__ : str ) -> Optional[int]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCamelCase_ : List[str] =head
while fast and fast.next:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCamelCase_ : List[Any] =[slow.val]
while slow.next:
lowerCamelCase_ : List[Any] =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCamelCase_ : Union[str, Any] =cur.next
return True
def _snake_case ( lowerCamelCase__ : Dict ) -> Optional[Any]:
if not head or not head.next:
return True
lowerCamelCase_ : Union[str, Any] ={}
lowerCamelCase_ : List[Any] =0
while head:
if head.val in d:
d[head.val].append(lowerCamelCase__ )
else:
lowerCamelCase_ : List[str] =[pos]
lowerCamelCase_ : Optional[int] =head.next
pos += 1
lowerCamelCase_ : Union[str, Any] =pos - 1
lowerCamelCase_ : Optional[int] =0
for v in d.values():
if len(lowerCamelCase__ ) % 2 != 0:
middle += 1
else:
lowerCamelCase_ : Optional[Any] =0
for i in range(0 , len(lowerCamelCase__ ) ):
if v[i] + v[len(lowerCamelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 209 | 1 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__lowerCAmelCase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__lowerCAmelCase = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]:
_snake_case = spearmanr(UpperCAmelCase , UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 341 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : int , snake_case : Tuple=False )-> List[Any]:
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
_lowerCamelCase = len(set_a.intersection(snake_case ) )
if alternative_union:
_lowerCamelCase = len(snake_case ) + len(snake_case )
else:
_lowerCamelCase = len(set_a.union(snake_case ) )
return intersection / union
if isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) ):
_lowerCamelCase = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCamelCase = len(snake_case ) + len(snake_case )
return len(snake_case ) / union
else:
_lowerCamelCase = set_a + [element for element in set_b if element not in set_a]
return len(snake_case ) / len(snake_case )
return len(snake_case ) / len(snake_case )
return None
if __name__ == "__main__":
A_ : Dict ={"""a""", """b""", """c""", """d""", """e"""}
A_ : List[str] ={"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 365 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : int =logging.get_logger(__name__)
A_ : Tuple ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "deta"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a__=None , a__=9_00 , a__=20_48 , a__=6 , a__=20_48 , a__=8 , a__=6 , a__=10_24 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=3_00 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__ , a__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(a__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = num_queries
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = auxiliary_loss
_lowerCamelCase = position_embedding_type
# deformable attributes
_lowerCamelCase = num_feature_levels
_lowerCamelCase = encoder_n_points
_lowerCamelCase = decoder_n_points
_lowerCamelCase = two_stage
_lowerCamelCase = two_stage_num_proposals
_lowerCamelCase = with_box_refine
_lowerCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCamelCase = class_cost
_lowerCamelCase = bbox_cost
_lowerCamelCase = giou_cost
# Loss coefficients
_lowerCamelCase = mask_loss_coefficient
_lowerCamelCase = dice_loss_coefficient
_lowerCamelCase = bbox_loss_coefficient
_lowerCamelCase = giou_loss_coefficient
_lowerCamelCase = eos_coefficient
_lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 80 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :int ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__UpperCAmelCase = [p / w for p, w in zip(snake_case_ , snake_case_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__UpperCAmelCase = sorted(snake_case_ )
# declaring useful variables
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__UpperCAmelCase = sorted_profit_by_weight[length - i - 1]
__UpperCAmelCase = profit_by_weight.index(snake_case_ )
__UpperCAmelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_lowercase : str = [int(x) for x in input('Input profits separated by spaces: ').split()]
_lowercase : str = [int(x) for x in input('Input weights separated by spaces: ').split()]
_lowercase : Any = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 332 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] =DanceDiffusionPipeline
lowercase : Dict =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase : List[str] =PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase : Dict =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase : Any =False
lowercase : Any =False
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[str] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase_ , use_timestep_embedding=UpperCamelCase_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
lowercase_ :Optional[Any] = IPNDMScheduler()
lowercase_ :int = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase_ :Union[str, Any] = torch.manual_seed(UpperCamelCase_ )
else:
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase_ :List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase ( self ):
lowercase_ :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Optional[int] = self.get_dummy_components()
lowercase_ :str = DanceDiffusionPipeline(**UpperCamelCase_ )
lowercase_ :List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = self.get_dummy_inputs(UpperCamelCase_ )
lowercase_ :Tuple = pipe(**UpperCamelCase_ )
lowercase_ :Optional[int] = output.audios
lowercase_ :Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase_ :List[str] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :str = torch_device
lowercase_ :Optional[int] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
lowercase_ :List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :List[Any] = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.096 )
lowercase_ :Tuple = output.audios
lowercase_ :Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase_ :int = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = torch_device
lowercase_ :Any = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
lowercase_ :List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Tuple = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.096 )
lowercase_ :Optional[Any] = output.audios
lowercase_ :Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase_ :int = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 355 |
from itertools import count
def UpperCamelCase ( _a = 5_0 ) -> int:
'''simple docstring'''
lowercase_ :Dict = [1] * min_block_length
for n in count(_a ):
fill_count_functions.append(1 )
for block_length in range(_a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 252 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( UpperCAmelCase_ : Dict ):
"""simple docstring"""
a , a :Tuple = image.size
a , a :Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a :str = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a :Union[str, Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
a :Tuple = image[None].transpose(0 , 3 , 1 , 2 )
a :str = torch.from_numpy(UpperCAmelCase_ )
return 2.0 * image - 1.0
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(vqvae=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 100 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
if isinstance(_lowerCamelCase , PIL.Image.Image ):
a :List[str] = 1
elif isinstance(_lowerCamelCase , torch.Tensor ):
a :int = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_lowerCamelCase )}''' )
if isinstance(_lowerCamelCase , PIL.Image.Image ):
a :Dict = preprocess(_lowerCamelCase )
a , a :Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a :Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
a :int = next(self.unet.parameters() ).dtype
a :Tuple = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
a :Optional[Any] = image.to(device=self.device , dtype=_lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_lowerCamelCase , device=self.device )
a :Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a :Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a :Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a :Tuple = {}
if accepts_eta:
a :Optional[int] = eta
for t in self.progress_bar(_lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
a :Tuple = torch.cat([latents, image] , dim=1 )
a :int = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
a :Union[str, Any] = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
a :Optional[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
a :Optional[int] = self.vqvae.decode(_lowerCamelCase ).sample
a :Optional[Any] = torch.clamp(_lowerCamelCase , -1.0 , 1.0 )
a :List[Any] = image / 2 + 0.5
a :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a :int = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 94 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Tuple = '''▁'''
snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case : Tuple = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
snake_case : int = {
'''xlm-roberta-base''': 5_12,
'''xlm-roberta-large''': 5_12,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_12,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_12,
'''xlm-roberta-large-finetuned-conll03-english''': 5_12,
'''xlm-roberta-large-finetuned-conll03-german''': 5_12,
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
a :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
a :str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a :List[str] = 1
a :Dict = len(self.sp_model ) + self.fairseq_offset
a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
a :List[str] = self.__dict__.copy()
a :Optional[int] = None
a :int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCamelCase ):
a :Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a :Union[str, Any] = {}
a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :List[Any] = [self.cls_token_id]
a :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :int = [self.sep_token_id]
a :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
a :List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 94 | 1 |
def _lowerCAmelCase ( ):
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCAmelCase ( ):
return next(i for i in triangle_number_generator() if count_divisors(lowercase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 366 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( lowercase_ ):
random.seed(lowercase_ )
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# ^^ safe to call this function even if cuda is not available
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :Iterable[torch.nn.Parameter] , lowercase_ :float = 0.9999 , lowercase_ :float = 0.0 , lowercase_ :int = 0 , lowercase_ :bool = False , lowercase_ :Union[float, int] = 1.0 , lowercase_ :Union[float, int] = 2 / 3 , lowercase_ :Optional[Any] = None , lowercase_ :Dict[str, Any] = None , **lowercase_ :Dict , ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase = True
if kwargs.get('max_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['max_value']
if kwargs.get('min_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['min_value']
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowercase_ ) is not None:
UpperCAmelCase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs['device'] )
UpperCAmelCase = None
UpperCAmelCase = decay
UpperCAmelCase = min_decay
UpperCAmelCase = update_after_step
UpperCAmelCase = use_ema_warmup
UpperCAmelCase = inv_gamma
UpperCAmelCase = power
UpperCAmelCase = 0
UpperCAmelCase = None # set in `step()`
UpperCAmelCase = model_cls
UpperCAmelCase = model_config
@classmethod
def UpperCAmelCase__ ( cls :int , lowercase_ :Union[str, Any] , lowercase_ :Any ) -> "EMAModel":
UpperCAmelCase , UpperCAmelCase = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
UpperCAmelCase = model_cls.from_pretrained(lowercase_ )
UpperCAmelCase = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
UpperCAmelCase = self.model_cls.from_config(self.model_config )
UpperCAmelCase = self.state_dict()
state_dict.pop('shadow_params' , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> float:
UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase = (1 + step) / (10 + step)
UpperCAmelCase = min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase = max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
UpperCAmelCase = list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase = self.get_decay(self.optimization_step )
UpperCAmelCase = decay
UpperCAmelCase = 1 - decay
UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple=None , lowercase_ :Union[str, Any]=None ) -> None:
UpperCAmelCase = [
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> None:
UpperCAmelCase = copy.deepcopy(lowercase_ )
UpperCAmelCase = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
UpperCAmelCase = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError('Invalid min_decay' )
UpperCAmelCase = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError('Invalid optimization_step' )
UpperCAmelCase = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError('Invalid update_after_step' )
UpperCAmelCase = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError('Invalid use_ema_warmup' )
UpperCAmelCase = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
UpperCAmelCase = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
UpperCAmelCase = state_dict.get('shadow_params' , lowercase_ )
if shadow_params is not None:
UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 181 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a__( unittest.TestCase ):
def __init__( self : int , __snake_case : str , __snake_case : Dict=7 , __snake_case : int=3 , __snake_case : int=30 , __snake_case : Dict=4_00 , __snake_case : Optional[Any]=True , __snake_case : List[str]=None , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=[0.5, 0.5, 0.5] , __snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , __snake_case : List[Any]=True , __snake_case : List[Any]=1 / 2_55 , __snake_case : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
a : str = parent
a : Any = batch_size
a : Any = num_channels
a : Optional[Any] = min_resolution
a : Tuple = max_resolution
a : str = do_resize
a : List[str] = size
a : List[str] = do_normalize
a : List[Any] = image_mean
a : Tuple = image_std
a : Optional[Any] = do_rescale
a : Any = rescale_factor
a : int = do_pad
def lowercase_ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : List[str] , __snake_case : str , __snake_case : Tuple=False ):
if not batched:
a : List[Any] = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
a , a : Dict = image.size
else:
a , a : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
a : Optional[Any] = int(self.size['shortest_edge'] * h / w )
a : Optional[int] = self.size['shortest_edge']
elif w > h:
a : List[str] = self.size['shortest_edge']
a : Tuple = int(self.size['shortest_edge'] * w / h )
else:
a : Any = self.size['shortest_edge']
a : str = self.size['shortest_edge']
else:
a : str = []
for image in image_inputs:
a , a : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a : Tuple = max(__snake_case , key=lambda __snake_case : item[0] )[0]
a : Tuple = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Optional[Any] ):
a : Dict = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ):
a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'image_mean' ) )
self.assertTrue(hasattr(__snake_case , 'image_std' ) )
self.assertTrue(hasattr(__snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
a : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __snake_case )
def lowercase_ ( self : List[str] ):
pass
def lowercase_ ( self : List[Any] ):
# Initialize image_processing
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
a : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a , a : Optional[int] = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a : Union[str, Any] = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
a : Dict = image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Any ):
# Initialize image_processing
a : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
a : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a , a : Any = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : str = image_processing(__snake_case , return_tensors='pt' ).pixel_values
a , a : Tuple = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Optional[int] ):
# Initialize image_processing
a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
a : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a , a : List[str] = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : Optional[int] = image_processing(__snake_case , return_tensors='pt' ).pixel_values
a , a : Any = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Optional[int] ):
# Initialize image_processings
a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
a : Tuple = self.image_processing_class(do_resize=__snake_case , do_normalize=__snake_case , do_rescale=__snake_case )
# create random PyTorch tensors
a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
a : str = image_processing_a.pad(__snake_case , return_tensors='pt' )
a : Optional[Any] = image_processing_a(__snake_case , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
# prepare image and target
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
a : int = json.loads(f.read() )
a : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
a : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
a : Optional[Any] = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='pt' )
# verify pixel values
a : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __snake_case )
a : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __snake_case , atol=1e-4 ) )
# verify area
a : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __snake_case ) )
# verify boxes
a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __snake_case )
a : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __snake_case , atol=1e-3 ) )
# verify image_id
a : Any = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __snake_case ) )
# verify is_crowd
a : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __snake_case ) )
# verify class_labels
a : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __snake_case ) )
# verify orig_size
a : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __snake_case ) )
# verify size
a : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __snake_case ) )
@slow
def lowercase_ ( self : List[str] ):
# prepare image, target and masks_path
a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
a : List[str] = json.loads(f.read() )
a : Union[str, Any] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
a : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a : Union[str, Any] = YolosImageProcessor(format='coco_panoptic' )
a : int = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='pt' )
# verify pixel values
a : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __snake_case )
a : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __snake_case , atol=1e-4 ) )
# verify area
a : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __snake_case ) )
# verify boxes
a : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __snake_case )
a : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __snake_case , atol=1e-3 ) )
# verify image_id
a : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __snake_case ) )
# verify is_crowd
a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __snake_case ) )
# verify class_labels
a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __snake_case ) )
# verify masks
a : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __snake_case )
# verify orig_size
a : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __snake_case ) )
# verify size
a : Optional[int] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __snake_case ) ) | 297 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 | 1 |
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
return str(lowerCAmelCase_ ) == str(lowerCAmelCase_ )[::-1]
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
return int(lowerCAmelCase_ ) + int(str(lowerCAmelCase_ )[::-1] )
def _A ( lowerCAmelCase_ : int = 1_0000 ):
"""simple docstring"""
lowerCAmelCase__ = []
for num in range(1 , lowerCAmelCase_ ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = num
while iterations < 50:
lowerCAmelCase__ = sum_reverse(lowerCAmelCase_ )
iterations += 1
if is_palindrome(lowerCAmelCase_ ):
break
else:
lychrel_nums.append(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370 |
from math import pi, sqrt
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 221 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = 0
def __magic_name__( self :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> str:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : str = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[str] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(**lowerCAmelCase__ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
config.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__( self :Dict ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' )
def __magic_name__( self :Tuple ) -> List[str]:
with self.assertRaisesRegex(
lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__( self :Optional[Any] ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__( self :Dict ) -> Tuple:
try:
AutoConfig.register('''custom''' , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Dict = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : List[str] = CustomImageProcessor.from_pretrained(lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__( self :List[Any] ) -> int:
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
try:
AutoConfig.register('''custom''' , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(lowerCAmelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 9 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(lowerCamelCase__ , lowerCamelCase__ )
if "finetuned" not in model_name:
A_ : Dict = False
if "finetuned" in model_name:
A_ : List[Any] = """huggingface/label-files"""
if "kinetics" in model_name:
A_ : Dict = 4_00
A_ : List[str] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
A_ : Tuple = 1_74
A_ : str = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if "small" in model_name:
A_ : int = 3_84
A_ : Union[str, Any] = 15_36
A_ : List[str] = 12
A_ : Optional[int] = 16
A_ : Any = 12
A_ : int = 3
A_ : Optional[Any] = 1_92
A_ : Union[str, Any] = 7_68
elif "large" in model_name:
A_ : List[Any] = 10_24
A_ : Optional[Any] = 40_96
A_ : Optional[Any] = 24
A_ : List[str] = 16
A_ : Any = 12
A_ : str = 8
A_ : str = 5_12
A_ : int = 20_48
elif "huge" in model_name:
A_ : Optional[Any] = 12_80
A_ : str = 51_20
A_ : str = 32
A_ : int = 16
A_ : Any = 12
A_ : Union[str, Any] = 8
A_ : Dict = 6_40
A_ : Optional[Any] = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "encoder." in name:
A_ : List[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
A_ : List[str] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
A_ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ : int = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ : Optional[Any] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ : Dict = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
A_ : List[str] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
A_ : List[str] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
A_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
A_ : str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
A_ : Union[str, Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
A_ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
A_ : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ : Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ : Dict = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ : List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
A_ : Optional[Any] = name.replace("""head""" , """classifier""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : str = orig_state_dict.pop(lowerCamelCase__ )
if key.startswith("""encoder.""" ):
A_ : Tuple = key.replace("""encoder.""" , """""" )
if "qkv" in key:
A_ : Optional[int] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
A_ : Union[str, Any] = config.decoder_hidden_size
A_ : Any = int(key_split[2] )
A_ : int = """decoder.decoder_layers."""
if "weight" in key:
A_ : Optional[Any] = val[:dim, :]
A_ : Any = val[dim : dim * 2, :]
A_ : Dict = val[-dim:, :]
else:
A_ : List[Any] = config.hidden_size
A_ : List[Any] = int(key_split[1] )
A_ : int = """videomae.encoder.layer."""
if "weight" in key:
A_ : Any = val[:dim, :]
A_ : Union[str, Any] = val[dim : dim * 2, :]
A_ : List[str] = val[-dim:, :]
else:
A_ : Union[str, Any] = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = get_videomae_config(lowerCamelCase__ )
if "finetuned" in model_name:
A_ : List[str] = VideoMAEForVideoClassification(lowerCamelCase__ )
else:
A_ : Optional[Any] = VideoMAEForPreTraining(lowerCamelCase__ )
# download original checkpoint, hosted on Google Drive
A_ : Optional[Any] = """pytorch_model.bin"""
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
A_ : Any = torch.load(lowerCamelCase__ , map_location="""cpu""" )
if "model" in files:
A_ : Any = files["""model"""]
else:
A_ : Dict = files["""module"""]
A_ : Any = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify model on basic input
A_ : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A_ : Union[str, Any] = prepare_video()
A_ : str = image_processor(lowerCamelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
A_ : List[str] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Optional[Any] = torch.load(lowerCamelCase__ )
A_ : Dict = model(**lowerCamelCase__ )
A_ : List[Any] = outputs.logits
A_ : Any = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ : str = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ : str = torch.Size([1, 1_74] )
A_ : Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
A_ : Tuple = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ : List[Any] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
A_ : str = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ : int = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Optional[int] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ : List[Any] = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Tuple = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
A_ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
A_ : List[Any] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : Any = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : str = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ : Optional[int] = outputs.loss
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 206 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A = typing.Union[np.floataa, int, float] # noqa: UP007
def __a ( lowerCAmelCase_ : Vector ,lowerCAmelCase_ : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowerCAmelCase_ ) - np.asarray(lowerCAmelCase_ )) ** 2 ) )
def __a ( lowerCAmelCase_ : Vector ,lowerCAmelCase_ : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase_ ,lowerCAmelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" ,number=1_00_00 ,globals=globals() ,) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" ,number=1_00_00 ,globals=globals() ,) )
benchmark()
| 277 |
__A = 6_5521
def __a ( lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_= 1
UpperCAmelCase_= 0
for plain_chr in plain_text:
UpperCAmelCase_= (a + ord(lowerCAmelCase_ )) % MOD_ADLER
UpperCAmelCase_= (b + a) % MOD_ADLER
return (b << 16) | a
| 277 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a__ : Dict = logging.get_logger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
try:
with open(_lowerCamelCase , '''rb''' ) as flax_state_f:
SCREAMING_SNAKE_CASE : Dict = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE : Dict = flatten_dict(jax.tree_util.tree_map(lambda a__ : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE : Optional[int] = jax.tree_util.tree_map(
lambda a__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = ""
SCREAMING_SNAKE_CASE : str = flatten_dict(_lowerCamelCase , sep='''.''' )
SCREAMING_SNAKE_CASE : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple_array[:-1] + ["weight"]
SCREAMING_SNAKE_CASE : Any = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE : int = flax_key_tuple_array[:-1] + ["weight"]
SCREAMING_SNAKE_CASE : Dict = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
SCREAMING_SNAKE_CASE : List[Any] = ".".join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE : int = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE : str = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(_lowerCamelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 313 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = '''hf-internal-testing/tiny-random-bert'''
__A = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
__A = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = cached_file(UpperCAmelCase , UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) )
with open(os.path.join(UpperCAmelCase , "refs" , "main" ) ) as f:
__lowerCamelCase : Dict = f.read()
self.assertEqual(UpperCAmelCase , os.path.join(UpperCAmelCase , "snapshots" , UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# File is cached at the same place the second time.
__lowerCamelCase : Tuple = cached_file(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# Using a specific revision to test the full commit hash.
__lowerCamelCase : List[str] = cached_file(UpperCAmelCase , UpperCAmelCase , revision="9b8c223" )
self.assertEqual(UpperCAmelCase , os.path.join(UpperCAmelCase , "snapshots" , UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : List[str] ):
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid model identifier" ):
__lowerCamelCase : Optional[Any] = cached_file("tiny-random-bert" , UpperCAmelCase )
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid git identifier" ):
__lowerCamelCase : Dict = cached_file(UpperCAmelCase , UpperCAmelCase , revision="aaaa" )
with self.assertRaisesRegex(UpperCAmelCase , "does not appear to have a file named" ):
__lowerCamelCase : List[Any] = cached_file(UpperCAmelCase , "conf" )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(UpperCAmelCase , "does not appear to have a file named" ):
__lowerCamelCase : Any = cached_file(UpperCAmelCase , "conf" )
with open(os.path.join(UpperCAmelCase , "refs" , "main" ) ) as f:
__lowerCamelCase : List[str] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase , ".no_exist" , UpperCAmelCase , "conf" ) ) )
__lowerCamelCase : List[str] = cached_file(UpperCAmelCase , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = cached_file(UpperCAmelCase , "conf" , local_files_only=UpperCAmelCase , _raise_exceptions_for_missing_entries=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
__lowerCamelCase : str = mock.Mock()
__lowerCamelCase : Union[str, Any] = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Dict = HTTPError
__lowerCamelCase : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head:
__lowerCamelCase : Any = cached_file(UpperCAmelCase , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : str ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Any ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCAmelCase , revision="ahaha" )
__lowerCamelCase : str = get_file_from_repo("bert-base-cased" , UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase : Tuple = json.loads(open(UpperCAmelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Union[str, Any] = Path(UpperCAmelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase , "a.txt" ) , str(UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase , "b.txt" ) ) | 135 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] = 10_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =1, 1
_SCREAMING_SNAKE_CASE =2
while True:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =fa + fa
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =fa, f
index += 1
for _ in str(a__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 361 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_UpperCamelCase ) * abs(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 114 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase ( lowerCAmelCase__ : Dict[str, torch.Tensor] ) -> List[Any]:
__a = []
__a = []
__a = []
for rt in rc.restypes:
__a = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__a = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__a = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__a = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__a = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__a = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__a = restype_atomaa_to_atomaa[protein_aatype]
__a = restype_atomaa_mask[protein_aatype]
__a = residx_atomaa_mask
__a = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__a = restype_atomaa_to_atomaa[protein_aatype]
__a = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__a = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__a = rc.restype_atoa[restype_letter]
__a = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__a = rc.atom_order[atom_name]
__a = 1
__a = restype_atomaa_mask[protein_aatype]
__a = residx_atomaa_mask
return protein
def lowercase ( lowerCAmelCase__ : Dict[str, torch.Tensor] ) -> Any:
__a = tree_map(lambda lowerCAmelCase__ : torch.tensor(snake_case__ , device=batch['''aatype'''].device ) , snake_case__ , np.ndarray )
__a = tensor_tree_map(lambda lowerCAmelCase__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 45 |
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ (UpperCamelCase : int ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ():
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
_a = [1, 2, 3]
with pytest.raises(UpperCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(UpperCamelCase , UpperCamelCase , num_proc=2 )
with pytest.raises(UpperCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(UpperCamelCase , UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = [1, 2]
_a = {'''a''': 1, '''b''': 2}
_a = {'''a''': [1, 2], '''b''': [3, 4]}
_a = {'''a''': {'''1''': 1}, '''b''': 2}
_a = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_a = [2, 3]
_a = {'''a''': 2, '''b''': 3}
_a = {'''a''': [2, 3], '''b''': [4, 5]}
_a = {'''a''': {'''1''': 2}, '''b''': 3}
_a = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
| 179 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case : int = get_tests_dir('fixtures')
_snake_case : Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_snake_case : Optional[int] = get_tests_dir('fixtures/dummy-config.json')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = 0
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_a = WavaVecaFeatureExtractor(**lowerCAmelCase_ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase_ )
config.save_pretrained(lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
_a = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
class A ( _a ):
lowercase_ = True
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(lowerCAmelCase_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 179 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = None
A__ = BloomTokenizerFast
A__ = BloomTokenizerFast
A__ = True
A__ = False
A__ = '''tokenizer_file'''
A__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowerCamelCase__ (self : str ) -> int:
"""simple docstring"""
super().setUp()
lowercase__ = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ (self : List[str] , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.get_rust_tokenizer()
lowercase__ = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowercase__ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase__ = tokenizer.batch_encode_plus(_UpperCAmelCase )["""input_ids"""]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=6 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ = """This is a simple input"""
lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ = ("""This is a simple input""", """This is a pair""")
lowercase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase__ = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.get_rust_tokenizer()
lowercase__ = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=_UpperCAmelCase )
lowercase__ = next(iter(_UpperCAmelCase ) )["""premise"""] # pick up one data
lowercase__ = list(sample_data.values() )
lowercase__ = list(map(tokenizer.encode , _UpperCAmelCase ) )
lowercase__ = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 305 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A__ ( unittest.TestCase ):
lowercase = inspect.getfile(accelerate.test_utils )
lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowercase = ['accelerate', 'launch']
lowercase = Path.home() / '.cache/huggingface/accelerate'
lowercase = 'default_config.yaml'
lowercase = config_folder / config_file
lowercase = config_folder / '_default_config.yaml'
lowercase = Path('tests/test_configs' )
@classmethod
def _lowerCamelCase ( cls : Dict ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCamelCase ( cls : List[Any] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=a ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(a ), self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class A__ ( unittest.TestCase ):
lowercase = 'test-tpu'
lowercase = 'us-central1-a'
lowercase = 'ls'
lowercase = ['accelerate', 'tpu-config']
lowercase = 'cd /usr/share'
lowercase = 'tests/test_samples/test_command_file.sh'
lowercase = 'Running gcloud compute tpus tpu-vm ssh'
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=a )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , a , ) | 307 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 | 307 | 1 |
import numpy
# List of input, output pairs
UpperCAmelCase_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase_ = [2, 4, 1, 5]
UpperCAmelCase_ = len(train_data)
UpperCAmelCase_ = 0.009
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]="train" ):
'''simple docstring'''
return calculate_hypothesis_value(A__ , A__ ) - output(
A__ , A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = 0
for i in range(len(A__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Dict ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( A__ : Dict , A__ : int ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( A__ : str , A__ : List[Any]=m ):
'''simple docstring'''
__lowerCamelCase = 0
for i in range(A__ ):
if index == -1:
summation_value += _error(A__ )
else:
summation_value += _error(A__ ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = summation_of_cost_derivative(A__ , A__ ) / m
return cost_derivative_value
def lowerCamelCase__ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCamelCase = 0.000_002
__lowerCamelCase = 0
__lowerCamelCase = 0
while True:
j += 1
__lowerCamelCase = [0, 0, 0, 0]
for i in range(0 , len(A__ ) ):
__lowerCamelCase = get_cost_derivative(i - 1 )
__lowerCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A__ , A__ , atol=A__ , rtol=A__ , ):
break
__lowerCamelCase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowerCamelCase__ ( ):
'''simple docstring'''
for i in range(len(A__ ) ):
print(("""Actual output value:""", output(A__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(A__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 12 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : Any = np.shape(snake_case_ )
if rows != columns:
_A : Optional[Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(snake_case_ )
_A : List[Any] = np.zeros((rows, columns) )
_A : Optional[int] = np.zeros((rows, columns) )
for i in range(snake_case_ ):
for j in range(snake_case_ ):
_A : Tuple = sum(lower[i][k] * upper[k][j] for k in range(snake_case_ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_A : Tuple = (table[i][j] - total) / upper[j][j]
_A : Optional[int] = 1
for j in range(snake_case_,snake_case_ ):
_A : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(snake_case_ ) )
_A : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ :str = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ :Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ :Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ :int = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ :Optional[int] = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = None
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(lowercase__ )
_UpperCAmelCase = _re_checkpoint.findall(lowercase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_UpperCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase = get_checkpoint_from_config_class(lowercase__ )
_UpperCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
_UpperCAmelCase = "\n".join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 329 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """mask2former"""
a__ : Union[str, Any] = ["""swin"""]
a__ : Dict = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : List[str] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : List[str] = backbone_config
snake_case : Optional[int] = feature_size
snake_case : Optional[int] = mask_feature_size
snake_case : Optional[int] = hidden_dim
snake_case : List[str] = encoder_feedforward_dim
snake_case : Dict = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : Any = decoder_layers
snake_case : Optional[int] = num_attention_heads
snake_case : List[str] = dropout
snake_case : List[Any] = dim_feedforward
snake_case : Tuple = pre_norm
snake_case : int = enforce_input_projection
snake_case : str = common_stride
snake_case : List[Any] = ignore_value
snake_case : Optional[int] = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Dict = class_weight
snake_case : Tuple = mask_weight
snake_case : Tuple = dice_weight
snake_case : Tuple = train_num_points
snake_case : int = oversample_ratio
snake_case : Dict = importance_sample_ratio
snake_case : Tuple = init_std
snake_case : Dict = init_xavier_std
snake_case : List[Any] = use_auxiliary_loss
snake_case : Dict = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Union[str, Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 148 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__snake_case = '''src/transformers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__snake_case = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__snake_case = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__snake_case = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__snake_case = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__snake_case = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__snake_case = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__snake_case = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__snake_case = re.compile(r'''^\s*try:''')
# Catches a line with else:
__snake_case = re.compile(r'''^\s*else:''')
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if _re_test_backend.search(_lowerCAmelCase ) is None:
return None
_a = [b[0] for b in _re_backend.findall(_lowerCAmelCase )]
backends.sort()
return "_and_".join(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
with open(_lowerCAmelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_a = f.readlines()
_a = 0
while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_a = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_a = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCAmelCase ):
_a = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0]
_a = re.findall('''\[([^\]]+)\]''', _lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_a = _re_import_struct_key_value.search(_lowerCAmelCase )
if single_line_import_search is not None:
_a = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_a = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_a = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_a = lines[line_index]
if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None:
_a = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(''', ''' )
_a = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_between_brackets.search(_lowerCAmelCase ) is not None:
_a = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(''', ''' )
_a = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_quote_object.search(_lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_a = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_a = []
while (
line_index < len(_lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_a = lines[line_index]
_a = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_a = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_a = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_a = lines[line_index]
_a = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_a = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
def find_duplicates(_lowerCAmelCase : Dict ):
return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_a = []
for key in import_dict_objects.keys():
_a = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
_a = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_a = '''base imports''' if key == '''none''' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ):
"""simple docstring"""
_a = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
_a = os.path.join(_lowerCAmelCase, '''__init__.py''' )
_a = parse_init(_lowerCAmelCase )
if objects is not None:
_a = analyze_results(*_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(_lowerCAmelCase ) )
def A_ ( ):
"""simple docstring"""
_a = []
for path, directories, files in os.walk(_lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_a = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) )
_a = short_path.replace(os.path.sep, '''.''' )
submodules.append(_lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_a = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) )
_a = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_lowerCAmelCase )
return submodules
__snake_case = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A_ ( ):
"""simple docstring"""
_a = importlib.util.spec_from_file_location(
'''transformers''', os.path.join(_lowerCAmelCase, '''__init__.py''' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
_a = spec.loader.load_module()
_a = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCAmelCase ) > 0:
_a = '''\n'''.join(f'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 153 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RobertaTokenizer
A_ : Any = RobertaTokenizerFast
A_ : Dict = True
A_ : Tuple = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
_a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCAmelCase ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) | 153 | 1 |
from math import pi, sqrt, tan
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
SCREAMING_SNAKE_CASE__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
SCREAMING_SNAKE_CASE__ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 219 | import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__lowerCamelCase : Any = 5
__lowerCamelCase : Dict = 10
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = SpeechaTextTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def __a ( self : Tuple ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """<pad>"""
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowercase ) , 10_01 )
def __a ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class __snake_case ( unittest.TestCase ):
lowerCAmelCase_ = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase_ = "C'est trop cool"
lowerCAmelCase_ = "Esto es genial"
@classmethod
def __a ( cls : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __a ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __a ( self : int ):
"""simple docstring"""
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fr"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 219 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A__ )
A__ : List[Any] = -1
A__ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A__ )
A__ : Dict = model.generate(A__ , max_new_tokens=10 , do_sample=A__ )
A__ : Tuple = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A__ : Dict = TextStreamer(A__ )
model.generate(A__ , max_new_tokens=10 , do_sample=A__ , streamer=A__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A__ : Optional[Any] = cs.out[:-1]
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A__ )
A__ : Optional[Any] = -1
A__ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A__ )
A__ : Any = model.generate(A__ , max_new_tokens=10 , do_sample=A__ )
A__ : Any = tokenizer.decode(greedy_ids[0] )
A__ : List[Any] = TextIteratorStreamer(A__ )
A__ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
A__ : List[str] = Thread(target=model.generate , kwargs=A__ )
thread.start()
A__ : List[Any] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A__ )
A__ : List[Any] = -1
A__ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A__ )
A__ : Optional[int] = model.generate(A__ , max_new_tokens=10 , do_sample=A__ )
A__ : Dict = greedy_ids[:, input_ids.shape[1] :]
A__ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A__ : int = TextStreamer(A__ , skip_prompt=A__ )
model.generate(A__ , max_new_tokens=10 , do_sample=A__ , streamer=A__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A__ : List[Any] = cs.out[:-1]
self.assertEqual(A__ , A__ )
def __A ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A__ : Tuple = AutoTokenizer.from_pretrained("""distilgpt2""" )
A__ : List[str] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(A__ )
A__ : int = -1
A__ : Optional[Any] = torch.ones((1, 5) , device=A__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A__ : List[str] = TextStreamer(A__ , skip_special_tokens=A__ )
model.generate(A__ , max_new_tokens=1 , do_sample=A__ , streamer=A__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A__ : int = cs.out[:-1] # Remove the final "\n"
A__ : Optional[Any] = tokenizer(A__ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __A ( self ):
A__ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A__ )
A__ : List[Any] = -1
A__ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A__ )
A__ : Union[str, Any] = TextIteratorStreamer(A__ , timeout=0.0_0_1 )
A__ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
A__ : Dict = Thread(target=model.generate , kwargs=A__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A__ ):
A__ : Optional[int] = """"""
for new_text in streamer:
streamer_text += new_text
| 361 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=A__ ).to(A__ )
A__ : str = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A__ : int = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A__ : Union[str, Any] = model(input_ids.to(A__ ) , labels=labels.to(A__ ) ).loss
A__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
A__ : Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 141 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: int = logging.get_logger(__name__)
A__: Tuple = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "data2vec-vision"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :Tuple="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[int]=1e-12 , SCREAMING_SNAKE_CASE :str=2_2_4 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Any=3 , SCREAMING_SNAKE_CASE :Any=False , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Dict=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE :Any=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :List[str]=0.4 , SCREAMING_SNAKE_CASE :Tuple=2_5_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=1 , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Any=2_5_5 , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_a : List[str] =hidden_size
_a : Union[str, Any] =num_hidden_layers
_a : Any =num_attention_heads
_a : Optional[int] =intermediate_size
_a : Optional[Any] =hidden_act
_a : int =hidden_dropout_prob
_a : Tuple =attention_probs_dropout_prob
_a : Union[str, Any] =initializer_range
_a : Dict =layer_norm_eps
_a : int =image_size
_a : Any =patch_size
_a : Optional[int] =num_channels
_a : Union[str, Any] =use_mask_token
_a : Optional[Any] =use_absolute_position_embeddings
_a : List[str] =use_relative_position_bias
_a : int =use_shared_relative_position_bias
_a : str =layer_scale_init_value
_a : List[str] =drop_path_rate
_a : Tuple =use_mean_pooling
# decode head attributes (semantic segmentation)
_a : Any =out_indices
_a : int =pool_scales
# auxiliary head attributes (semantic segmentation)
_a : int =use_auxiliary_head
_a : Optional[Any] =auxiliary_loss_weight
_a : List[Any] =auxiliary_channels
_a : List[str] =auxiliary_num_convs
_a : Union[str, Any] =auxiliary_concat_input
_a : List[Any] =semantic_loss_ignore_index
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = version.parse("1.11" )
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCAmelCase ( self :int ) -> float:
'''simple docstring'''
return 1e-4
| 276 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
a_ = True
except (ImportError, AttributeError):
a_ = object
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
pass
a_ = False
a_ = logging.get_logger('transformers-cli/serving')
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCamelCase , args.host , args.port , args.workers )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Tuple:
__lowercase : Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : List[Any] = pipeline
__lowercase : str = host
__lowercase : List[str] = port
__lowercase : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowercase : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self ) -> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Optional[int]:
try:
__lowercase : Any = self._pipeline.tokenizer.tokenize(UpperCamelCase_ )
if return_ids:
__lowercase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
return ServeTokenizeResult(tokens=UpperCamelCase_ , tokens_ids=UpperCamelCase_ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , ) -> Dict:
try:
__lowercase : Tuple = self._pipeline.tokenizer.decode(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
async def _lowerCamelCase ( self , UpperCamelCase_=Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Union[str, Any]:
# Check we don't have empty string
if len(UpperCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowercase : Optional[Any] = self._pipeline(UpperCamelCase_ )
return ServeForwardResult(output=UpperCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(UpperCamelCase_ )} )
| 249 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=13 , lowercase_ : Optional[int]=30 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Dict=4 , lowercase_ : List[str]=37 , lowercase_ : List[str]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[str]=10 , lowercase_ : Any=0.02 , lowercase_ : List[str]=None , lowercase_ : str=2 , ):
lowercase_ : List[Any] = parent
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : int = num_channels
lowercase_ : str = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : int = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : Tuple = scope
lowercase_ : List[Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : int = (image_size // patch_size) ** 2
lowercase_ : Optional[int] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = ViTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
lowercase_ : int = ViTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : List[str] = 1
lowercase_ : List[str] = ViTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Dict = self.type_sequence_label_size
lowercase_ : Any = ViTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : int = 1
lowercase_ : Optional[Any] = ViTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Dict = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Any = ViTModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(lowercase_ )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[Any] = [*signature.parameters.keys()]
lowercase_ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = ViTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> str:
lowercase_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Any = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : str = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : int = model(**lowercase_ )
# verify the logits
lowercase_ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : List[str] = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
lowercase_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowercase_ )
lowercase_ : str = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : int = image_processor(images=lowercase_ , return_tensors="""pt""" )
lowercase_ : Union[str, Any] = inputs.pixel_values.to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(lowercase_ , interpolate_pos_encoding=lowercase_ )
# verify the logits
lowercase_ : List[Any] = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
lowercase_ : Optional[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Union[str, Any] = image_processor(images=lowercase_ , return_tensors="""pt""" )
lowercase_ : Union[str, Any] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : Optional[Any] = model(lowercase_ )
| 239 | '''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : List[str] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : Tuple = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ):
lowercase_ : int = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase_ : Optional[int] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase_ : List[str] = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase_ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 239 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( __a : Optional[int] ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def __UpperCAmelCase ( __a : Optional[int] ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_a : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a ,id=__a )
| 355 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = '▁'
UpperCAmelCase__ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCAmelCase__ : Any = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCAmelCase__ : Tuple = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Tuple="</s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[str]="<s>" , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Optional[int]="<mask>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
_A: Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
_A: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_A: int = vocab_file
_A: Optional[Any] = monolingual_vocab_file
_A: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A: Dict = {}
_A: Dict = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase_ ) not in self.fairseq_tokens_to_ids:
_A: str = cnt
cnt += 1
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A: Optional[Any] = line.strip().split()[0]
_A: Union[str, Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase_ ) not in self.fairseq_tokens_to_ids:
_A: Optional[int] = len(self.fairseq_tokens_to_ids )
_A: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
"""simple docstring"""
_A: Optional[int] = self.__dict__.copy()
_A: str = None
_A: List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A: str = {}
_A: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __magic_name__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A: Union[str, Any] = [self.cls_token_id]
_A: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_A: Optional[int] = [self.sep_token_id]
_A: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Any = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __magic_name__ ( self : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: Optional[Any] = ''''''.join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , ''' ''' ).strip()
return out_string
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Optional[int] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , '''wb''' ) as fi:
_A: Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(lowerCAmelCase_ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 121 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121 | 1 |
import os
from collections.abc import Iterator
def snake_case( __magic_name__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__magic_name__ ):
lowercase : Tuple = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__magic_name__ )[1] in (".py", ".ipynb"):
yield os.path.join(__magic_name__ , __magic_name__ ).lstrip('''./''' )
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
return F"""{i * ' '}*""" if i else "\n##"
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Dict = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__magic_name__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__magic_name__ )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def snake_case( __magic_name__ = "." ) -> None:
'''simple docstring'''
lowercase : str = ''''''
for filepath in sorted(good_file_paths(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = os.path.split(__magic_name__ )
if filepath != old_path:
lowercase : str = print_path(__magic_name__ , __magic_name__ )
lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase : Optional[Any] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
lowercase : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(__magic_name__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.') | 116 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse} | 116 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase__ ( UpperCAmelCase_ : NDArray[floataa] , UpperCAmelCase_ : NDArray[floataa] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , ) -> list[float]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = coefficient_matrix.shape
__lowerCamelCase , __lowerCamelCase : Dict = constant_matrix.shape
if rowsa != colsa:
__lowerCamelCase : Union[str, Any] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(UpperCAmelCase_ )
if colsa != 1:
__lowerCamelCase : int = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(UpperCAmelCase_ )
if rowsa != rowsa:
__lowerCamelCase : Tuple = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != rowsa:
__lowerCamelCase : Optional[Any] = (
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(UpperCAmelCase_ )} and {rowsa}'
)
raise ValueError(UpperCAmelCase_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__lowerCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowerCamelCase , __lowerCamelCase : Dict = table.shape
strictly_diagonally_dominant(UpperCAmelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase_ ):
__lowerCamelCase : Optional[int] = []
for row in range(UpperCAmelCase_ ):
__lowerCamelCase : Any = 0
for col in range(UpperCAmelCase_ ):
if col == row:
__lowerCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
__lowerCamelCase : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCamelCase : Union[str, Any] = (temp + val) / denom
new_val.append(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = new_val
return [float(UpperCAmelCase_ ) for i in new_val]
def UpperCAmelCase__ ( UpperCAmelCase_ : NDArray[floataa] ) -> bool:
__lowerCamelCase , __lowerCamelCase : Optional[int] = table.shape
__lowerCamelCase : str = True
for i in range(0 , UpperCAmelCase_ ):
__lowerCamelCase : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 185 | 1 |
'''simple docstring'''
from __future__ import annotations
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
if len(_snake_case ) == 0:
return False
snake_case__ : Dict = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
__a = input("Enter numbers separated by comma:\n").strip()
__a = [int(item.strip()) for item in user_input.split(",")]
__a = int(input("Enter the number to be found in the list:\n").strip())
__a = "" if binary_search(sequence, target) else "not "
print(F"{target} was {not_str}found in {sequence}")
| 369 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] ):
raise NotImplementedError()
def lowerCamelCase ( self : Optional[int] ):
raise NotImplementedError()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ):
snake_case__ : Tuple = tokenizer
snake_case__ : List[str] = skip_prompt
snake_case__ : Optional[int] = decode_kwargs
# variables used in the streaming process
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = True
def lowerCamelCase ( self : List[str] , snake_case_ : int ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
snake_case__ : Optional[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case__ : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case__ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
snake_case__ : int = text[self.print_len :]
snake_case__ : Optional[int] = []
snake_case__ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case__ : str = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def lowerCamelCase ( self : int ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
snake_case__ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case__ : Optional[Any] = text[self.print_len :]
snake_case__ : Tuple = []
snake_case__ : int = 0
else:
snake_case__ : int = """"""
snake_case__ : Union[str, Any] = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : bool = False ):
print(snake_case_ , flush=snake_case_ , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , snake_case_ : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : List[Any] ):
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
snake_case__ : Dict = Queue()
snake_case__ : List[Any] = None
snake_case__ : int = timeout
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : bool = False ):
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[str] ):
return self
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 43 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A (self : Any , _lowerCAmelCase : str=None ):
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__(self : str , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : int ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = load_image(_lowerCAmelCase )
A = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
A = self.model(**_lowerCAmelCase )
return model_outputs
def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits , axis=-1 )[0]
A = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 258 | 1 |
'''simple docstring'''
import argparse
import os
import re
_snake_case : List[str] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_snake_case : Any = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_snake_case : List[str] = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
_a = f.read()
_a = content.split('''\n''' )
_a = []
_a = 0
while line_idx < len(UpperCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(UpperCamelCase , key=lambda UpperCamelCase : _re_identifier.search(UpperCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase ) )
elif "\n".join(UpperCamelCase ) != content:
return True
def snake_case_ (UpperCamelCase : bool = False ):
'''simple docstring'''
_a = [os.path.join(UpperCamelCase , UpperCamelCase ) for f in os.listdir(UpperCamelCase ) if f.endswith('''.py''' )]
_a = [sort_auto_mapping(UpperCamelCase , overwrite=UpperCamelCase ) for fname in fnames]
if not overwrite and any(UpperCamelCase ):
_a = [f for f, d in zip(UpperCamelCase , UpperCamelCase ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(UpperCamelCase )}. Run `make style` to fix'
''' this.''' )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_snake_case : Tuple = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 179 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_a = nums[0]
_a = 0
for num in nums[1:]:
_a , _a = (
max_excluding + num,
max(UpperCamelCase , UpperCamelCase ),
)
return max(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Tuple =VOCAB_FILES_NAMES
__lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __lowercase : List[str] , __lowercase : Dict=False , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : int="[CLS]" , __lowercase : List[str]="[SEP]" , __lowercase : Dict="[UNK]" , __lowercase : str="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : str="[MASK]" , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor()
self.sp_model.Load(__lowercase )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Tuple , __lowercase : Any ):
'''simple docstring'''
__a = d
__a = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : str , __lowercase : Optional[int] , __lowercase : Optional[int]=False ):
'''simple docstring'''
__a = self.sp_model.EncodeAsPieces(__lowercase )
return pieces
def UpperCamelCase_ ( self : str , __lowercase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowercase )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = self.sp_model.decode_pieces(__lowercase )
return out_string
def UpperCamelCase_ ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__lowercase ) )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCamelCase : Any = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_lowerCamelCase : Any = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCamelCase : Optional[int] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCamelCase : str = sorted(arg_to_scheduler.keys())
_lowerCamelCase : Dict = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class lowercase ( pl.LightningModule ):
def __init__( self : int , _UpperCamelCase : argparse.Namespace , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]="base" , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=_UpperCamelCase , **_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(self.config , _UpperCamelCase ), F"model config doesn't have a `{p}` attribute"
setattr(self.config , _UpperCamelCase , getattr(self.hparams , _UpperCamelCase ) )
if tokenizer is None:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = model
def __snake_case( self : Optional[Any] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
SCREAMING_SNAKE_CASE = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __snake_case( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model
SCREAMING_SNAKE_CASE = ["bias", "LayerNorm.weight"]
SCREAMING_SNAKE_CASE = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
SCREAMING_SNAKE_CASE = Adafactor(
_UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=_UpperCamelCase , relative_step=_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = AdamW(
_UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
SCREAMING_SNAKE_CASE = optimizer
SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.validation_step(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.validation_end(_UpperCamelCase )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case( self : Any , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if stage == "test":
SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
SCREAMING_SNAKE_CASE = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=_UpperCamelCase )
SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def __snake_case( self : Dict , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def __snake_case( self : Any ) -> str:
'''simple docstring'''
return self.train_loader
def __snake_case( self : str ) -> int:
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=_UpperCamelCase )
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
_UpperCamelCase , list(filter(_UpperCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case( self : Optional[int] , _UpperCamelCase : Dict[str, Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.output_dir.joinpath("best_tfmr" )
SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(_UpperCamelCase )
self.tokenizer.save_pretrained(_UpperCamelCase )
@staticmethod
def __snake_case( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=_UpperCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=_UpperCamelCase , type=_UpperCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(_UpperCamelCase ).parent / "test_run" / "cache" ) , type=_UpperCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=_UpperCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=_UpperCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=_UpperCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=_UpperCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=_UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=_UpperCamelCase , metavar=_UpperCamelCase , type=_UpperCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=_UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=_UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=_UpperCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=_UpperCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=_UpperCamelCase )
parser.add_argument("--train_batch_size" , default=32 , type=_UpperCamelCase )
parser.add_argument("--eval_batch_size" , default=32 , type=_UpperCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class lowercase ( pl.Callback ):
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase ( pl.Callback ):
def __snake_case( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_UpperCamelCase )
class lowercase ( pl.Callback ):
def __snake_case( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["scheduler"]
SCREAMING_SNAKE_CASE = {F"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_UpperCamelCase )
def __snake_case( self : str , _UpperCamelCase : pl.Trainer , _UpperCamelCase : pl.LightningModule ) -> List[str]:
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(_UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_UpperCamelCase , str(metrics[key] ) ) )
def __snake_case( self : List[Any] , _UpperCamelCase : pl.Trainer , _UpperCamelCase : pl.LightningModule ) -> Dict:
'''simple docstring'''
rank_zero_info("***** Test results *****" )
SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(_UpperCamelCase , "w" ) as writer:
for key in sorted(_UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_UpperCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(_UpperCamelCase , str(metrics[key] ) ) )
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" , default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "model_checkpoints" ) , type=UpperCAmelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=UpperCAmelCase__ , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=UpperCAmelCase__ )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=UpperCAmelCase__ , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=UpperCAmelCase__ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=UpperCAmelCase__ , default=4_2 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-train-data" ) , type=UpperCAmelCase__ , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __lowerCamelCase (UpperCAmelCase__ : BaseTransformer , UpperCAmelCase__ : argparse.Namespace , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=[] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] , ):
pl.seed_everything(args.seed )
# init model
SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=UpperCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(UpperCAmelCase__ )
if logging_callback is None:
SCREAMING_SNAKE_CASE = LoggingCallback()
SCREAMING_SNAKE_CASE = {}
if args.fpaa:
SCREAMING_SNAKE_CASE = 1_6
if args.gpus > 1:
SCREAMING_SNAKE_CASE = "auto"
SCREAMING_SNAKE_CASE = "ddp"
SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "auto"
SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
UpperCAmelCase__ , weights_summary=UpperCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=UpperCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **UpperCAmelCase__ , )
if args.do_train:
trainer.fit(UpperCAmelCase__ )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 206 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : int
lowercase__ : int
lowercase__ : float
lowercase__ : float
lowercase__ : Tuple[int]
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __snake_case( self : int ) -> str:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __snake_case( self : Any ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(_UpperCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.shape
SCREAMING_SNAKE_CASE = int(np.prod(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = self.get_image_coords()
SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE = self.get_camera_rays(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rays.view(_UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __snake_case( self : Optional[int] , _UpperCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE = coords.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = self.resolution()
SCREAMING_SNAKE_CASE = self.fov()
SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE = fracs.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = (
self.z.view(_UpperCamelCase , 1 , 3 )
+ self.x.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(_UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_UpperCamelCase , *_UpperCamelCase , 2 , 3 )
def __snake_case( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_UpperCamelCase , height=_UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
SCREAMING_SNAKE_CASE = np.array([np.sin(UpperCAmelCase__ ), np.cos(UpperCAmelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE = -z * 4
SCREAMING_SNAKE_CASE = np.array([np.cos(UpperCAmelCase__ ), -np.sin(UpperCAmelCase__ ), 0.0] )
SCREAMING_SNAKE_CASE = np.cross(UpperCAmelCase__ , UpperCAmelCase__ )
origins.append(UpperCAmelCase__ )
xs.append(UpperCAmelCase__ )
ys.append(UpperCAmelCase__ )
zs.append(UpperCAmelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , width=UpperCAmelCase__ , height=UpperCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase__ )) , )
| 206 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , **A_ ) -> Tuple:
"""simple docstring"""
super().__init__(**A_ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , A_ , **A_ ) -> str:
"""simple docstring"""
return super().__call__(A_ , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCamelCase ( self , A_ , A_=None , A_="This is a sound of {}." ) -> Tuple:
"""simple docstring"""
if isinstance(A_ , A_ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
UpperCamelCase = requests.get(A_ ).content
else:
with open(A_ , 'rb' ) as f:
UpperCamelCase = f.read()
if isinstance(A_ , A_ ):
UpperCamelCase = ffmpeg_read(A_ , self.feature_extractor.sampling_rate )
if not isinstance(A_ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
UpperCamelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(A_ ) for x in candidate_labels]
UpperCamelCase = self.tokenizer(A_ , return_tensors=self.framework , padding=A_ )
UpperCamelCase = [text_inputs]
return inputs
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = model_inputs.pop('candidate_labels' )
UpperCamelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , A_ ):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**A_ , **A_ )
UpperCamelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = model_outputs.pop('candidate_labels' )
UpperCamelCase = model_outputs['logits'][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=0 )
UpperCamelCase = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
UpperCamelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(A_ , A_ ) , key=lambda A_ : -x[0] )
]
return result
| 222 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCamelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sgugger/tiny-distilbert-classification'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
UpperCamelCase = None
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tinier_bart'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tinier_bart'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(A_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(A_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(A_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(A_ , 'env.csv' ) , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'env.csv' ) ).exists() )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ ):
self.assertTrue(hasattr(A_ , 'sequential' ) )
self.assertTrue(hasattr(A_ , 'cumulative' ) )
self.assertTrue(hasattr(A_ , 'current' ) )
self.assertTrue(hasattr(A_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , 'log.txt' ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , 'log.txt' ) ).exists() )
| 222 | 1 |
from collections.abc import Callable
import numpy as np
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
snake_case__ : List[str] = np.zeros((n + 1,) )
snake_case__ : Any = ya
snake_case__ : List[Any] = xa
for k in range(__lowerCAmelCase ):
snake_case__ : List[Any] = y[k] + step_size * ode_func(__lowerCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = TransfoXLTokenizer
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : Optional[int] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :int ,**__lowercase :Any ):
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
snake_case__ : int = '''<unk> UNwanted , running'''
snake_case__ : List[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__lowercase )
snake_case__ : Tuple = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase ,['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[0, 4, 8, 7] )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = TransfoXLTokenizer(lower_case=__lowercase )
snake_case__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case__ : Union[str, Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'''new1''' )
| 44 | 1 |
'''simple docstring'''
import math
import qiskit
def __lowerCamelCase ( A__ = 1 , A__ = 1 , A__ = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(A__ , A__ )
or isinstance(A__ , A__ )
or isinstance(A__ , A__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(A__ ) != input_a)
or (math.floor(A__ ) != input_a)
or (math.floor(A__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
UpperCamelCase = qiskit.QuantumRegister(4 , 'qr' )
UpperCamelCase = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
UpperCamelCase = [input_a, input_a, carry_in]
UpperCamelCase = qiskit.QuantumCircuit(A__ , A__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(A__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , A__ ) # measure the last two qbits
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' )
UpperCamelCase = qiskit.execute(A__ , A__ , shots=1_000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 28 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
UpperCAmelCase__ : Any = StableDiffusionSAGPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Union[str, Any] = False
def snake_case_ ( self ) -> str:
torch.manual_seed(0 )
UpperCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
UpperCamelCase : Any = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=lowercase_, set_alpha_to_one=lowercase_, )
torch.manual_seed(0 )
UpperCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
UpperCamelCase : Dict = CLIPTextModel(lowercase_ )
UpperCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(lowercase_ ).startswith('mps' ):
UpperCamelCase : Tuple = torch.manual_seed(lowercase_ )
else:
UpperCamelCase : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCamelCase : Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase : Optional[int] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase : str = """."""
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : int = sag_pipe(
[prompt], generator=lowercase_, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='np' )
UpperCamelCase : Dict = output.images
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[str] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : Tuple = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase : Tuple = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase : str = """."""
UpperCamelCase : Any = torch.manual_seed(0 )
UpperCamelCase : Optional[int] = sag_pipe(
[prompt], generator=lowercase_, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='np' )
UpperCamelCase : str = output.images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase : Union[str, Any] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase : Optional[Any] = """."""
UpperCamelCase : List[str] = torch.manual_seed(0 )
UpperCamelCase : int = sag_pipe(
[prompt], width=768, height=512, generator=lowercase_, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='np', )
UpperCamelCase : int = output.images
assert image.shape == (1, 512, 768, 3)
| 371 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 103 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
A : List[str] = GPTaTokenizer
A : int = GPTaTokenizerFast
A : int = True
A : Tuple = {'add_prefix_space': True}
A : Dict = False
def __lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase : Tuple = {"unk_token": "<unk>"}
lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = "lower newer"
lowercase : Any = "lower newer"
return input_text, output_text
def __lowerCamelCase ( self ):
lowercase : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Union[str, Any] = "lower newer"
lowercase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tokens + [tokenizer.unk_token]
lowercase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowercase : str = self.get_tokenizer()
lowercase : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = "lower newer"
# Testing tokenization
lowercase : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
lowercase : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
lowercase : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowercase : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
lowercase : List[str] = tokens + [rust_tokenizer.unk_token]
lowercase : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
pass
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
lowercase : str = "This is a simple input"
lowercase : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowercase : Tuple = ("This is a simple input", "This is a pair")
lowercase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , )
def __lowerCamelCase ( self ):
lowercase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowercase : Union[str, Any] = "This is a simple input"
lowercase : Optional[int] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase : Optional[Any] = ("This is a simple input", "This is a pair")
lowercase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase : int = tokenizer.pad_token_id
lowercase : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowercase : int = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
lowercase : Any = tokenizer(*SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowercase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = "$$$"
lowercase : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = "This is a simple input"
lowercase : str = ["This is a simple input 1", "This is a simple input 2"]
lowercase : Optional[Any] = tokenizer.bos_token_id
lowercase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase : List[str] = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase : str = "Encode this."
lowercase : Optional[Any] = "This one too please."
lowercase : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = encoded_sequence_dict["input_ids"]
lowercase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
lowercase : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE__ )
]
lowercase : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = "A photo of a cat"
lowercase : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
lowercase : str = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = "A photo of a cat"
lowercase : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=SCREAMING_SNAKE_CASE__ )
lowercase : Any = "bos"
lowercase : Tuple = tokenizer.get_vocab()["bos"]
lowercase : Optional[int] = "A photo of a cat"
lowercase : Any = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
lowercase : Tuple = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowercase : Optional[Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [31957, 250, 1345, 9, 10, 4758] )
| 354 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = 'audio-spectrogram-transformer'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=128 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Any = patch_size
lowercase : Tuple = qkv_bias
lowercase : str = frequency_stride
lowercase : Union[str, Any] = time_stride
lowercase : Dict = max_length
lowercase : List[str] = num_mel_bins
| 173 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = 42
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , _snake_case = 16 , _snake_case = 88 , _snake_case = None , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = 32 , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = "geglu" , _snake_case = True , _snake_case = True , ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = attention_head_dim
_lowerCAmelCase = num_attention_heads * attention_head_dim
_lowerCAmelCase = in_channels
_lowerCAmelCase = torch.nn.GroupNorm(num_groups=_snake_case , num_channels=_snake_case , eps=1e-6 , affine=_snake_case )
_lowerCAmelCase = nn.Linear(_snake_case , _snake_case )
# 3. Define transformers blocks
_lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
_snake_case , _snake_case , _snake_case , dropout=_snake_case , cross_attention_dim=_snake_case , activation_fn=_snake_case , attention_bias=_snake_case , double_self_attention=_snake_case , norm_elementwise_affine=_snake_case , )
for d in range(_snake_case )
] )
_lowerCAmelCase = nn.Linear(_snake_case , _snake_case )
def snake_case ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=1 , _snake_case=None , _snake_case = True , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_states.shape
_lowerCAmelCase = batch_frames // num_frames
_lowerCAmelCase = hidden_states
_lowerCAmelCase = hidden_states[None, :].reshape(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_lowerCAmelCase = self.norm(_snake_case )
_lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _snake_case , _snake_case )
_lowerCAmelCase = self.proj_in(_snake_case )
# 2. Blocks
for block in self.transformer_blocks:
_lowerCAmelCase = block(
_snake_case , encoder_hidden_states=_snake_case , timestep=_snake_case , cross_attention_kwargs=_snake_case , class_labels=_snake_case , )
# 3. Output
_lowerCAmelCase = self.proj_out(_snake_case )
_lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_lowerCAmelCase = hidden_states.reshape(_snake_case , _snake_case , _snake_case , _snake_case )
_lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_snake_case )
| 82 |
from math import isqrt, loga
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def _UpperCAmelCase ( snake_case = 80_08_00 , snake_case = 80_08_00 ):
"""simple docstring"""
_lowerCAmelCase = degree * loga(snake_case )
_lowerCAmelCase = int(snake_case )
_lowerCAmelCase = calculate_prime_numbers(snake_case )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __A ( unittest.TestCase ):
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self ):
lowerCamelCase =None
lowerCamelCase =20
lowerCamelCase =self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase_ )
# tweak scores to not be uniform anymore
lowerCamelCase =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase =jax.nn.softmax(UpperCAmelCase_ , axis=-1 )
lowerCamelCase =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase =FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase =jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase_ , scores.copy() , cur_len=UpperCAmelCase_ ) , axis=-1 )
lowerCamelCase =jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase_ , scores.copy() , cur_len=UpperCAmelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self ):
lowerCamelCase =None
lowerCamelCase =10
lowerCamelCase =2
# create ramp distribution
lowerCamelCase =np.broadcast_to(np.arange(UpperCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase =ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase =FlaxTopKLogitsWarper(3 )
lowerCamelCase =top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase =5
lowerCamelCase =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase =np.broadcast_to(np.arange(UpperCAmelCase_ )[None, :] , (batch_size, length) ).copy()
lowerCamelCase =top_k_warp_safety_check(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self ):
lowerCamelCase =None
lowerCamelCase =10
lowerCamelCase =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase =FlaxTopPLogitsWarper(0.8 )
lowerCamelCase =np.exp(top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase =np.broadcast_to(np.arange(UpperCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase =ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase =top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self ):
lowerCamelCase =20
lowerCamelCase =4
lowerCamelCase =0
lowerCamelCase =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_ )
# check that min length is applied at length 5
lowerCamelCase =ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase =5
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =min_dist_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =15
lowerCamelCase =min_dist_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertFalse(jnp.isinf(UpperCAmelCase_ ).any() )
def _snake_case ( self ):
lowerCamelCase =20
lowerCamelCase =4
lowerCamelCase =0
lowerCamelCase =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_ )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase =ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase =1
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase =3
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertFalse(jnp.isinf(UpperCAmelCase_ ).any() )
def _snake_case ( self ):
lowerCamelCase =20
lowerCamelCase =4
lowerCamelCase =0
lowerCamelCase =5
lowerCamelCase =FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase =ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase =4
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase =3
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertFalse(jnp.isinf(UpperCAmelCase_ ).any() )
def _snake_case ( self ):
lowerCamelCase =4
lowerCamelCase =10
lowerCamelCase =15
lowerCamelCase =2
lowerCamelCase =1
lowerCamelCase =15
# dummy input_ids and scores
lowerCamelCase =ids_tensor((batch_size, sequence_length) , UpperCAmelCase_ )
lowerCamelCase =input_ids.copy()
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =scores.copy()
# instantiate all dist processors
lowerCamelCase =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase =FlaxTopKLogitsWarper(3 )
lowerCamelCase =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_ )
lowerCamelCase =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_ )
lowerCamelCase =FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase =10
# no processor list
lowerCamelCase =temp_dist_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =min_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =bos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =eos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# with processor list
lowerCamelCase =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase =processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self ):
lowerCamelCase =4
lowerCamelCase =10
lowerCamelCase =15
lowerCamelCase =2
lowerCamelCase =1
lowerCamelCase =15
# dummy input_ids and scores
lowerCamelCase =ids_tensor((batch_size, sequence_length) , UpperCAmelCase_ )
lowerCamelCase =input_ids.copy()
lowerCamelCase =self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =scores.copy()
# instantiate all dist processors
lowerCamelCase =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase =FlaxTopKLogitsWarper(3 )
lowerCamelCase =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_ )
lowerCamelCase =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_ )
lowerCamelCase =FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase =10
# no processor list
def run_no_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =temp_dist_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =min_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =bos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
lowerCamelCase =eos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
return scores
# with processor list
def run_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase =processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
return scores
lowerCamelCase =jax.jit(UpperCAmelCase_ )
lowerCamelCase =jax.jit(UpperCAmelCase_ )
lowerCamelCase =jitted_run_no_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =jitted_run_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 350 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 - _cos) / 2
lowerCamelCase =1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 + _cos) / 2
lowerCamelCase =-1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =_sin / 2
lowerCamelCase =0
lowerCamelCase =-ba
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =1 - alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 + alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =1 + alpha * big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha * big_a
lowerCamelCase =1 + alpha / big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha / big_a
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (pmc + aaa)
lowerCamelCase =2 * big_a * mpc
lowerCamelCase =big_a * (pmc - aaa)
lowerCamelCase =ppmc + aaa
lowerCamelCase =-2 * pmpc
lowerCamelCase =ppmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (ppmc + aaa)
lowerCamelCase =-2 * big_a * pmpc
lowerCamelCase =big_a * (ppmc - aaa)
lowerCamelCase =pmc + aaa
lowerCamelCase =2 * mpc
lowerCamelCase =pmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 262 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TapasConfig.from_json_file(__UpperCamelCase )
# set absolute/relative position embeddings parameter
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
SCREAMING_SNAKE_CASE__ = TapasForQuestionAnswering(config=__UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = True
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE__ = 0.66_4694
SCREAMING_SNAKE_CASE__ = 0.20_7951
SCREAMING_SNAKE_CASE__ = 0.12_1194
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0.035_2513
SCREAMING_SNAKE_CASE__ = TapasForQuestionAnswering(config=__UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = False
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE__ = 36.4519
SCREAMING_SNAKE_CASE__ = 0.90_3421
SCREAMING_SNAKE_CASE__ = 222.088
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 0.76_3141
SCREAMING_SNAKE_CASE__ = TapasForQuestionAnswering(config=__UpperCamelCase )
elif task == "TABFACT":
SCREAMING_SNAKE_CASE__ = TapasForSequenceClassification(config=__UpperCamelCase )
elif task == "MLM":
SCREAMING_SNAKE_CASE__ = TapasForMaskedLM(config=__UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
SCREAMING_SNAKE_CASE__ = TapasModel(config=__UpperCamelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCamelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
SCREAMING_SNAKE_CASE__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=5_12 )
tokenizer.save_pretrained(__UpperCamelCase )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 219 | import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Tuple , _lowercase : Optional[int]=None , **_lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 219 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase_ =logging.getLogger(__name__)
def a_ ( _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase = 10 , _lowercase = 2 ):
def get_dataset(_lowercase ):
_UpperCamelCase : int = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCamelCase : List[str] = get_dataset(A__ )
_UpperCamelCase : int = get_dataset(A__ )
_UpperCamelCase : List[str] = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
_UpperCamelCase : Dict = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
_UpperCamelCase : str = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCamelCase , _UpperCamelCase : str = batch
_UpperCamelCase : int = model(A__ )
_UpperCamelCase : str = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
def __init__( self : Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Dict = nn.Parameter(torch.randn(1 ) )
_UpperCamelCase : List[str] = nn.Parameter(torch.randn(1 ) )
def snake_case ( self : List[Any], lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return x * self.a + self.b
class _a ( unittest.TestCase ):
def snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCamelCase : List[str] = DummyModel()
_UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
_UpperCamelCase , _UpperCamelCase : List[Any] = dummy_dataloaders()
_UpperCamelCase : str = ProjectConfiguration(total_limit=1, project_dir=UpperCamelCase_, automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_UpperCamelCase : str = Accelerator(project_config=UpperCamelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = accelerator.prepare(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ), 1 )
def snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCamelCase : Optional[Any] = DummyModel()
_UpperCamelCase : List[str] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
_UpperCamelCase , _UpperCamelCase : str = dummy_dataloaders()
# Train baseline
_UpperCamelCase : Union[str, Any] = Accelerator()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = accelerator.prepare(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Save initial
_UpperCamelCase : Union[str, Any] = os.path.join(UpperCamelCase_, '''initial''' )
accelerator.save_state(UpperCamelCase_ )
((_UpperCamelCase) , (_UpperCamelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCamelCase : List[Any] = optimizer.state_dict()
_UpperCamelCase : List[Any] = train(3, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
((_UpperCamelCase) , (_UpperCamelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCamelCase : int = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCamelCase : Dict = DummyModel()
_UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
_UpperCamelCase , _UpperCamelCase : List[str] = dummy_dataloaders()
_UpperCamelCase : Tuple = Accelerator()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = accelerator.prepare(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
((_UpperCamelCase) , (_UpperCamelCase)) : Tuple = model.a.item(), model.b.item()
_UpperCamelCase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
_UpperCamelCase : Optional[Any] = train(2, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Save everything
_UpperCamelCase : Dict = os.path.join(UpperCamelCase_, '''checkpoint''' )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
((_UpperCamelCase) , (_UpperCamelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCamelCase : str = optimizer.state_dict()
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
def snake_case ( self : int ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCamelCase : List[str] = DummyModel()
_UpperCamelCase : List[str] = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
_UpperCamelCase , _UpperCamelCase : List[str] = dummy_dataloaders()
_UpperCamelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_UpperCamelCase : List[Any] = Accelerator(project_dir=UpperCamelCase_, project_config=UpperCamelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = accelerator.prepare(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Save initial
accelerator.save_state()
((_UpperCamelCase) , (_UpperCamelCase)) : List[Any] = model.a.item(), model.b.item()
_UpperCamelCase : Union[str, Any] = optimizer.state_dict()
_UpperCamelCase : Dict = train(3, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
((_UpperCamelCase) , (_UpperCamelCase)) : Any = model.a.item(), model.b.item()
_UpperCamelCase : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCamelCase : List[str] = DummyModel()
_UpperCamelCase : int = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
_UpperCamelCase , _UpperCamelCase : Dict = dummy_dataloaders()
_UpperCamelCase : List[Any] = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=UpperCamelCase_ )
_UpperCamelCase : Any = Accelerator(project_dir=UpperCamelCase_, project_config=UpperCamelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = accelerator.prepare(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_, '''checkpoints''', '''checkpoint_0''' ) )
((_UpperCamelCase) , (_UpperCamelCase)) : Optional[int] = model.a.item(), model.b.item()
_UpperCamelCase : List[Any] = optimizer.state_dict()
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
_UpperCamelCase : Tuple = train(2, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_, '''checkpoints''', '''checkpoint_1''' ) )
test_rands += train(1, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
((_UpperCamelCase) , (_UpperCamelCase)) : Optional[int] = model.a.item(), model.b.item()
_UpperCamelCase : List[str] = optimizer.state_dict()
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
self.assertEqual(UpperCamelCase_, UpperCamelCase_ )
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = torch.tensor([1, 2, 3] )
_UpperCamelCase : int = torch.tensor([2, 3, 4] )
_UpperCamelCase : List[str] = DummyModel()
_UpperCamelCase : int = torch.optim.Adam(net.parameters() )
_UpperCamelCase : str = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
_UpperCamelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case ( self : int ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCamelCase : int = DummyModel()
_UpperCamelCase : str = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
_UpperCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(UpperCamelCase_, step_size=1, gamma=0.99 )
_UpperCamelCase , _UpperCamelCase : Any = dummy_dataloaders()
_UpperCamelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_UpperCamelCase : List[str] = Accelerator(project_dir=UpperCamelCase_, project_config=UpperCamelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = accelerator.prepare(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# Save initial
accelerator.save_state()
_UpperCamelCase : List[str] = scheduler.state_dict()
train(3, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_, scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_, '''checkpoints''', '''checkpoint_0''' ) )
self.assertEqual(UpperCamelCase_, scheduler.state_dict() )
def snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCamelCase : Tuple = DummyModel()
_UpperCamelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_, total_limit=2 )
# Train baseline
_UpperCamelCase : Any = Accelerator(project_dir=UpperCamelCase_, project_config=UpperCamelCase_ )
_UpperCamelCase : Tuple = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_, '''checkpoints''', '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_, '''checkpoints''', '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_, '''checkpoints''', '''checkpoint_10''' ) ) )
@require_cuda
def snake_case ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_, env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ ="""/tmp/accelerate/state_checkpointing"""
UpperCamelCase_ =DummyModel()
UpperCamelCase_ =torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase_ =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase_ , UpperCamelCase_ =dummy_dataloaders()
UpperCamelCase_ =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase_ =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase_ , UpperCamelCase_ =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase_ =group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase_ =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
UpperCamelCase_ =group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
UpperCamelCase_ =group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 357 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase_ ="""bart"""
UpperCamelCase_ =True
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Union[str, Any] = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : str = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : List[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Dict = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : List[Any] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : List[Any] = faiss.StandardGpuResources()
_UpperCamelCase : List[str] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : Tuple = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCamelCase : Optional[int] = faiss.IndexFlatIP(128 )
_UpperCamelCase : Tuple = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase )
wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase : Tuple = (None, None)
_UpperCamelCase : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
_UpperCamelCase : Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCamelCase : Any = elia['''train_eli5''']
_UpperCamelCase : Union[str, Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCamelCase : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowercase )
return (elia_train, eli5_train_q_index)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_indexes()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_models()
UpperCamelCase_ , UpperCamelCase_ =load_train_data()
def a_ ( _lowercase , _lowercase=10 ):
_UpperCamelCase : Any = embed_questions_for_retrieval([question] , _lowercase , _lowercase )
_UpperCamelCase , _UpperCamelCase : List[Any] = eli5_train_q_index.search(_lowercase , _lowercase )
_UpperCamelCase : Tuple = [elia_train[int(_lowercase )] for i in I[0]]
return nn_examples
def a_ ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=10 ):
if source == "none":
_UpperCamelCase , _UpperCamelCase : List[str] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase : Dict = query_qa_dense_index(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
_UpperCamelCase , _UpperCamelCase : List[str] = query_es_index(
_lowercase , _lowercase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowercase , )
_UpperCamelCase : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : List[Any] = '''question: {} context: {}'''.format(_lowercase , _lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None),
} )
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase=64 , _lowercase=256 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ):
with torch.no_grad():
_UpperCamelCase : List[Any] = qa_sas_generate(
_lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
UpperCamelCase_ ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
UpperCamelCase_ ="""
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase_ ="""
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase_ =[
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
UpperCamelCase_ =st.sidebar.checkbox("""Demo options""")
if demo_options:
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
UpperCamelCase_ =action_list.index(action_st)
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
UpperCamelCase_ =show_type == """Show full text of passages"""
else:
UpperCamelCase_ =3
UpperCamelCase_ =True
UpperCamelCase_ =st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
UpperCamelCase_ ="""
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
UpperCamelCase_ ="""wiki40b"""
UpperCamelCase_ ="""dense"""
UpperCamelCase_ ="""beam"""
UpperCamelCase_ =2
UpperCamelCase_ =64
UpperCamelCase_ =256
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =st.sidebar.checkbox("""Generation options""")
if generate_options:
UpperCamelCase_ ="""
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
UpperCamelCase_ =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
UpperCamelCase_ =st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase_ =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase_ =st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase_ =None
# start main text
UpperCamelCase_ =[
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
UpperCamelCase_ =st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase_ =st.text_input("""Enter your question here:""", """""")
else:
UpperCamelCase_ =question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""dense""", n_results=10)
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""sparse""", n_results=10)
UpperCamelCase_ =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase_ =support_list[:10]
UpperCamelCase_ ="""<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase_ , UpperCamelCase_ =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
UpperCamelCase_ ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
UpperCamelCase_ =res[1].strip()
if sec_titles == "":
UpperCamelCase_ ="""[{}]({})""".format(res[0], wiki_url)
else:
UpperCamelCase_ =sec_titles.split(""" & """)
UpperCamelCase_ =""" & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase_ =find_nearest_training(question)
UpperCamelCase_ =nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
UpperCamelCase_ =[
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
UpperCamelCase_ ="""
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 128 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
# Initialise PyTorch model
_snake_case = TaConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
_snake_case = TaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 42 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A =namedtuple('covid_data', 'cases deaths recovered')
def _UpperCamelCase ( UpperCamelCase__ = "https://www.worldometers.info/coronavirus/" ):
UpperCAmelCase__ : Union[str, Any] = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
__A ='Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats())) | 163 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowercase ( __lowerCAmelCase ) -> str:
def wrapper(*__lowerCAmelCase , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = timeit.default_timer()
SCREAMING_SNAKE_CASE__ : List[Any] = func(*__lowerCAmelCase , **__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE__ : Optional[int] = func.__name__
return wrapper
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Dict = seq_shapes or {}
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCAmelCase , _ArrayXD ):
SCREAMING_SNAKE_CASE__ : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE__ : Optional[int] = """The small grey turtle was surprisingly fast when challenged."""
else:
SCREAMING_SNAKE_CASE__ : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCAmelCase , datasets.Sequence ):
while isinstance(__lowerCAmelCase , datasets.Sequence ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = v.feature
SCREAMING_SNAKE_CASE__ : str = seq_shapes[k]
SCREAMING_SNAKE_CASE__ : List[str] = np.random.rand(*__lowerCAmelCase ).astype(v.dtype )
SCREAMING_SNAKE_CASE__ : Any = data
dummy_data.append((i, example) )
return dummy_data
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = generate_examples(__lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes=__lowerCAmelCase )
with ArrowWriter(features=__lowerCAmelCase , path=__lowerCAmelCase ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE__ : List[str] = features.encode_example(__lowerCAmelCase )
writer.write(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.Dataset.from_file(filename=__lowerCAmelCase , info=datasets.DatasetInfo(features=__lowerCAmelCase ) )
return dataset
| 56 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention
SCREAMING_SNAKE_CASE__ : str = position_biased_input
SCREAMING_SNAKE_CASE__ : List[str] = pos_att_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Any = 300
return config
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :str = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 56 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : Tuple = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
_lowerCamelCase : int = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_lowerCamelCase : List[Any] = """▁"""
class lowercase ( UpperCamelCase__):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Union[str, Any]="[CLS]" , _lowerCamelCase : str="[SEP]" , _lowerCamelCase : List[Any]="<unk>" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : List[str]="<pad>" , _lowerCamelCase : Optional[Any]="[CLS]" , _lowerCamelCase : List[Any]="[MASK]" , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : Any , ):
"""simple docstring"""
A_ : List[str] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
A_ : Tuple = do_lower_case
A_ : int = remove_space
A_ : List[Any] = keep_accents
A_ : List[Any] = vocab_file
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Tuple = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Optional[int] = None
return state
def __setstate__( self : Dict , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
if self.remove_space:
A_ : Dict = " ".join(inputs.strip().split() )
else:
A_ : List[Any] = inputs
A_ : str = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
A_ : List[Any] = unicodedata.normalize('''NFKD''' , lowerCAmelCase__ )
A_ : Any = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
A_ : int = outputs.lower()
return outputs
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.preprocess_text(lowerCAmelCase__ )
A_ : Optional[Any] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
A_ : Optional[int] = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Optional[Any] = cur_pieces[1:]
else:
A_ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def a_ ( self : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase__ )
def a_ ( self : Dict , _lowerCamelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def a_ ( self : Tuple , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Dict = []
A_ : Tuple = ""
A_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
A_ : str = True
A_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
A_ : List[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def a_ ( self : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] = None ):
"""simple docstring"""
A_ : Union[str, Any] = [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] = None , _lowerCamelCase : int = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def a_ ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int = None ):
"""simple docstring"""
A_ : Tuple = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : int = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 167 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCamelCase ( __a :List[str] , __a :Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ = iter(A_ )
while True:
A__ = tuple(itertools.islice(A_ , A_ ) )
if not chunk:
return
yield chunk
def __lowerCamelCase ( __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
A__ = ''''''
if len(A_ ) < 2:
return dirty
for i in range(len(A_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(A_ ) & 1:
clean += "X"
return clean
def __lowerCamelCase ( __a :Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(A_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(A_ )
return table
def __lowerCamelCase ( __a :int , __a :Any ) -> str:
"""simple docstring"""
A__ = generate_table(A_ )
A__ = prepare_input(A_ )
A__ = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A_ , 2 ):
A__ = divmod(table.index(A_ ) , 5 )
A__ = divmod(table.index(A_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCamelCase ( __a :Dict , __a :int ) -> List[str]:
"""simple docstring"""
A__ = generate_table(A_ )
A__ = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A_ , 2 ):
A__ = divmod(table.index(A_ ) , 5 )
A__ = divmod(table.index(A_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 358 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = '''gelu'''
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : int=0 , ) -> Any:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = TFBlenderbotSmallModel(config=__lowerCAmelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def __lowerCamelCase ( __a :Dict , __a :Tuple , __a :List[Any] , __a :List[str]=None , __a :List[Any]=None , __a :Optional[Any]=None , __a :List[str]=None , __a :int=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Tuple = False
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__lowerCamelCase : Optional[int] = '''facebook/blenderbot_small-90M'''
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 276 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_UpperCAmelCase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Union[str, Any] ,__lowercase : Union[str, Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
A_ : str = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,__lowercase ,)
is not None
):
A_ : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A_ : List[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A_ : List[Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
A_ : List[str] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
A_ : Any = True
if not attribute_used:
A_ : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A_ : int = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A_ : Optional[int] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A_ : int = True
elif attribute.endswith('_token_id' ):
A_ : Any = True
# configuration class specific cases
if not case_allowed:
A_ : Any = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
A_ : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
A_ : Tuple = dict(inspect.signature(config_class.__init__ ).parameters )
A_ : int = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
A_ : Any = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A_ : Tuple = {}
if len(config_class.attribute_map ) > 0:
A_ : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A_ : str = inspect.getsourcefile(__lowercase )
A_ : List[str] = os.path.dirname(__lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A_ : str = [os.path.join(__lowercase ,__lowercase ) for fn in os.listdir(__lowercase ) if fn.startswith('modeling_' )]
# Get the source code strings
A_ : Optional[Any] = []
for path in modeling_paths:
if os.path.isfile(__lowercase ):
with open(__lowercase ) as fp:
modeling_sources.append(fp.read() )
A_ : Dict = []
for config_param, default_value in zip(__lowercase ,__lowercase ):
# `attributes` here is all the variant names for `config_param`
A_ : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowercase ,__lowercase ,__lowercase ,__lowercase ):
unused_attributes.append(attributes[0] )
return sorted(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A_ : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda __lowercase : inspect.isclass(__lowercase )
and issubclass(__lowercase ,__lowercase )
and inspect.getmodule(__lowercase ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
A_ : int = check_config_attributes_being_used(__lowercase )
if len(__lowercase ) > 0:
A_ : Optional[int] = unused_attributes
if len(__lowercase ) > 0:
A_ : Union[str, Any] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(__lowercase )
if __name__ == "__main__":
check_config_attributes()
| 140 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_UpperCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_UpperCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
for tf_name, hf_name in patterns:
A_ : Tuple = k.replace(__lowercase ,__lowercase )
return k
def UpperCamelCase ( __lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : int = BigBirdPegasusConfig(**__lowercase )
A_ : Any = BigBirdPegasusForConditionalGeneration(__lowercase )
A_ : Union[str, Any] = torch_model.state_dict()
A_ : Any = {}
# separating decoder weights
A_ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
A_ : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() ,'tf -> hf conversion' ):
A_ : Optional[int] = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Optional[Any] = DECODER_PATTERNS
A_ : Tuple = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : Any = v.T
A_ : Any = torch.from_numpy(__lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,'tf -> hf conversion' ):
A_ : int = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Any = REMAINING_PATTERNS
A_ : List[str] = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : int = v.T
A_ : Dict = torch.from_numpy(__lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ : Optional[int] = mapping['model.embed_positions.weight']
A_ : Tuple = mapping.pop('model.embed_positions.weight' )
A_ , A_ : Optional[Any] = torch_model.load_state_dict(__lowercase ,strict=__lowercase )
A_ : Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : str = tf.train.list_variables(__lowercase )
A_ : Union[str, Any] = {}
A_ : Optional[Any] = ['global_step']
for name, shape in tqdm(__lowercase ,desc='converting tf checkpoint to dict' ):
A_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ : Tuple = tf.train.load_variable(__lowercase ,__lowercase )
A_ : Dict = array
return tf_weights
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ):
'''simple docstring'''
A_ : Optional[Any] = get_tf_weights_as_numpy(__lowercase )
A_ : Dict = convert_bigbird_pegasus(__lowercase ,__lowercase )
torch_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 140 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = 'blip_2_vision_model'
def __init__( self : Dict ,_UpperCAmelCase : int=1408 ,_UpperCAmelCase : Any=6144 ,_UpperCAmelCase : Union[str, Any]=39 ,_UpperCAmelCase : Union[str, Any]=16 ,_UpperCAmelCase : List[str]=224 ,_UpperCAmelCase : Optional[int]=14 ,_UpperCAmelCase : Tuple="gelu" ,_UpperCAmelCase : Tuple=0.0_00_01 ,_UpperCAmelCase : Dict=0.0 ,_UpperCAmelCase : Union[str, Any]=1E-10 ,_UpperCAmelCase : int=True ,**_UpperCAmelCase : Tuple ,):
super().__init__(**a_ )
_a : Union[str, Any] = hidden_size
_a : List[str] = intermediate_size
_a : Dict = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Any = patch_size
_a : Union[str, Any] = image_size
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = attention_dropout
_a : Optional[int] = layer_norm_eps
_a : Optional[Any] = hidden_act
_a : List[str] = qkv_bias
@classmethod
def __lowercase ( cls : Any ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Any ):
cls._set_token_in_kwargs(a_ )
_a , _a : Optional[Any] = cls.get_config_dict(a_ ,**a_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ ,**a_ )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Optional[int] = 'blip_2_qformer'
def __init__( self : Tuple ,_UpperCAmelCase : Tuple=30522 ,_UpperCAmelCase : Any=768 ,_UpperCAmelCase : Union[str, Any]=12 ,_UpperCAmelCase : Optional[int]=12 ,_UpperCAmelCase : Union[str, Any]=3072 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[Any]=512 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : Optional[Any]=1E-12 ,_UpperCAmelCase : str=0 ,_UpperCAmelCase : List[str]="absolute" ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : List[Any]=1408 ,**_UpperCAmelCase : Tuple ,):
super().__init__(pad_token_id=a_ ,**a_ )
_a : int = vocab_size
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : List[Any] = hidden_act
_a : int = intermediate_size
_a : Optional[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Optional[int] = max_position_embeddings
_a : List[Any] = initializer_range
_a : Optional[Any] = layer_norm_eps
_a : Any = position_embedding_type
_a : Optional[int] = cross_attention_frequency
_a : Tuple = encoder_hidden_size
@classmethod
def __lowercase ( cls : Optional[int] ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : List[str] ):
cls._set_token_in_kwargs(a_ )
_a , _a : Dict = cls.get_config_dict(a_ ,**a_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : int = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ ,**a_ )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Optional[int] = 'blip-2'
lowerCAmelCase : Dict = True
def __init__( self : Dict ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Tuple=32 ,**_UpperCAmelCase : Optional[int] ):
super().__init__(**a_ )
if vision_config is None:
_a : List[Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_a : List[Any] = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_a : Optional[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_a : Optional[Any] = BlipaVisionConfig(**a_ )
_a : Optional[Any] = BlipaQFormerConfig(**a_ )
_a : Optional[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_a : Dict = CONFIG_MAPPING[text_model_type](**a_ )
_a : Dict = self.text_config.tie_word_embeddings
_a : Union[str, Any] = self.text_config.is_encoder_decoder
_a : str = num_query_tokens
_a : int = self.vision_config.hidden_size
_a : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a : int = 1.0
_a : Any = 0.02
@classmethod
def __lowercase ( cls : int ,_UpperCAmelCase : BlipaVisionConfig ,_UpperCAmelCase : BlipaQFormerConfig ,_UpperCAmelCase : PretrainedConfig ,**_UpperCAmelCase : int ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**a_ ,)
def __lowercase ( self : Tuple ):
_a : Tuple = copy.deepcopy(self.__dict__ )
_a : str = self.vision_config.to_dict()
_a : Any = self.qformer_config.to_dict()
_a : Optional[int] = self.text_config.to_dict()
_a : Any = self.__class__.model_type
return output
| 359 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : int = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
_a : str = primes[group]['prime']
_a : Optional[int] = primes[group]['generator']
_a : Tuple = int(hexlify(urandom(32 ) ) ,base=16 )
def __lowercase ( self : Dict ):
return hex(self.__private_key )[2:]
def __lowercase ( self : List[str] ):
_a : int = pow(self.generator ,self.__private_key ,self.prime )
return hex(_UpperCAmelCase )[2:]
def __lowercase ( self : int ,_UpperCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCAmelCase ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def __lowercase ( self : Tuple ,_UpperCAmelCase : str ):
_a : List[Any] = int(_UpperCAmelCase ,base=16 )
if not self.is_valid_public_key(_UpperCAmelCase ):
raise ValueError('Invalid public key' )
_a : Any = pow(_UpperCAmelCase ,self.__private_key ,self.prime )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
@staticmethod
def __lowercase ( _UpperCAmelCase : int ,_UpperCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCAmelCase ,(prime - 1) // 2 ,_UpperCAmelCase ) == 1
)
@staticmethod
def __lowercase ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int = 14 ):
_a : str = int(_UpperCAmelCase ,base=16 )
_a : int = int(_UpperCAmelCase ,base=16 )
_a : Any = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('Invalid public key' )
_a : List[str] = pow(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 |
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297 | 1 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(R"""\[([^\]]+)\]""")
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def _snake_case ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ):
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : int = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
_lowerCamelCase : str = ['''\n'''.join(lines[:index] )]
else:
_lowerCamelCase : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCamelCase : Optional[Any] = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(a__ ) )
if index < len(a__ ) - 1:
_lowerCamelCase : List[str] = [lines[index + 1]]
index += 1
else:
_lowerCamelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(a__ ) )
_lowerCamelCase : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append('\n'.join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _snake_case ( lowercase__ ):
def _inner(lowercase__ ):
return key(a__ ).lower().replace('_' , '' )
return _inner
def _snake_case ( lowercase__ , lowercase__=None ):
def noop(lowercase__ ):
return x
if key is None:
_lowerCamelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCamelCase : int = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCamelCase : Dict = [obj for obj in objects if not key(a__ )[0].isupper()]
_lowerCamelCase : Optional[int] = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def _snake_case ( lowercase__ ):
def _replace(lowercase__ ):
_lowerCamelCase : List[Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
_lowerCamelCase : Optional[int] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : List[Any] = keys[:-1]
return "[" + ", ".join([f'''\"{k}\"''' for k in sort_objects(a__ )] ) + "]"
_lowerCamelCase : Dict = import_statement.split('\n' )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCamelCase : Dict = 2 if lines[1].strip() == '''[''' else 1
_lowerCamelCase : Dict = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCamelCase : Union[str, Any] = sort_objects(a__ , key=lambda lowercase__ : x[1] )
_lowerCamelCase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCamelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCamelCase : Any = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : Dict = keys[:-1]
_lowerCamelCase : List[str] = get_indent(lines[1] ) + ''', '''.join([f'''\"{k}\"''' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCamelCase : str = _re_bracket_content.sub(_replace , a__ )
return import_statement
def _snake_case ( lowercase__ , lowercase__=True ):
with open(a__ , encoding='utf-8' ) as f:
_lowerCamelCase : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCamelCase : Tuple = split_code_in_indented_blocks(
a__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCamelCase : str = main_blocks[block_idx]
_lowerCamelCase : str = block.split('\n' )
# Get to the start of the imports.
_lowerCamelCase : str = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCamelCase : Tuple = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCamelCase : Optional[Any] = '''\n'''.join(block_lines[line_idx:-1] )
_lowerCamelCase : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCamelCase : Optional[Any] = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCamelCase : Optional[Any] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCamelCase : Dict = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCamelCase : Any = [(i, key) for i, key in enumerate(a__ ) if key is not None]
_lowerCamelCase : Union[str, Any] = [x[0] for x in sorted(a__ , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : str = []
for i in range(len(a__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCamelCase : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCamelCase : Union[str, Any] = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(a__ ) )
def _snake_case ( lowercase__=True ):
_lowerCamelCase : Tuple = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_lowerCamelCase : str = sort_imports(os.path.join(a__ , '__init__.py' ) , check_only=a__ )
if result:
_lowerCamelCase : str = [os.path.join(a__ , '__init__.py' )]
if len(a__ ) > 0:
raise ValueError(f'''Would overwrite {len(a__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 96 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__UpperCamelCase =tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__UpperCamelCase =tf_top_k_top_p_filtering(UpperCamelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__UpperCamelCase =output[output != -float('''inf''' )]
__UpperCamelCase =tf.cast(
tf.where(tf.not_equal(UpperCamelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-12 )
tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase , __a ):
"""simple docstring"""
if is_tf_available():
lowercase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCamelCase =2
__UpperCamelCase =2
class _lowercase ( tf.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super(UpperCamelCase__ , self ).__init__()
__UpperCamelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase__ , )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
__UpperCamelCase =self.model.generate(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , max_new_tokens=UpperCamelCase__ , return_dict_in_generate=UpperCamelCase__ , )
return {"sequences": outputs["sequences"]}
__UpperCamelCase =[[2, 0], [102, 103]]
__UpperCamelCase =[[1, 0], [1, 1]]
__UpperCamelCase =DummyModel(model=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': dummy_model.serving} )
__UpperCamelCase =tf.saved_model.load(UpperCamelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCamelCase__ ) + 1 ):
__UpperCamelCase ={
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__UpperCamelCase =serving_func(**UpperCamelCase__ )['''sequences''']
__UpperCamelCase =test_model.generate(**UpperCamelCase__ , max_new_tokens=UpperCamelCase__ )
tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCamelCase =1
__UpperCamelCase =2
class _lowercase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Tuple ) -> str:
'''simple docstring'''
super(UpperCamelCase__ , self ).__init__()
__UpperCamelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase__ , )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model.generate(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , max_new_tokens=UpperCamelCase__ , return_dict_in_generate=UpperCamelCase__ , )
return {"sequences": outputs["sequences"]}
__UpperCamelCase =[[2], [102, 103]]
__UpperCamelCase =[[1], [1, 1]]
__UpperCamelCase =DummyModel(model=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': dummy_model.serving} )
__UpperCamelCase =tf.saved_model.load(UpperCamelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCamelCase__ ) ):
__UpperCamelCase ={
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__UpperCamelCase =serving_func(**UpperCamelCase__ )['''sequences''']
__UpperCamelCase =test_model.generate(**UpperCamelCase__ , max_new_tokens=UpperCamelCase__ )
tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_tensorflow_text
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase__ )
class _lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCamelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
__UpperCamelCase =TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
__UpperCamelCase =self.tokenizer.tokenize(UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =text.pad_model_inputs(
UpperCamelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__UpperCamelCase =self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
return self.tokenizer.detokenize(UpperCamelCase__ )
__UpperCamelCase =CompleteSentenceTransformer()
__UpperCamelCase =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__UpperCamelCase =complete_model(UpperCamelCase__ )
__UpperCamelCase =tf.keras.Model(UpperCamelCase__ , UpperCamelCase__ )
keras_model.save(UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase ={
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__UpperCamelCase =14
__UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCamelCase ='''Hello, my dog is cute and'''
__UpperCamelCase =tokenizer(UpperCamelCase__ , return_tensors='''tf''' )
__UpperCamelCase =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCamelCase =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__UpperCamelCase =model.generate(**UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__UpperCamelCase =[638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__UpperCamelCase =model.generate(**UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__UpperCamelCase ='''Hugging Face is a technology company based in New York and Paris.'''
__UpperCamelCase =bart_tokenizer(UpperCamelCase__ , return_tensors='''tf''' ).input_ids
__UpperCamelCase =TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__UpperCamelCase =bart_model.generate(UpperCamelCase__ ).numpy()
class _lowercase ( __a ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return super().call(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__UpperCamelCase =bart_model.generate(UpperCamelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCamelCase__ , UpperCamelCase__ ) )
class _lowercase ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return super().call(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =FakeEncoder(bart_model.config , bart_model.model.shared )
__UpperCamelCase =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__UpperCamelCase =bart_model.generate(UpperCamelCase__ ).numpy()
with self.assertRaises(UpperCamelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCamelCase__ , foo='''bar''' )
| 85 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase = model_name.find('patch' )
UpperCamelCase = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCamelCase = XCLIPVisionConfig(patch_size=A__ , num_frames=A__ )
if "large" in model_name:
UpperCamelCase = 768
UpperCamelCase = 3_072
UpperCamelCase = 12
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 16
UpperCamelCase = 24
UpperCamelCase = 768
UpperCamelCase = 3_072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = 336
UpperCamelCase = XCLIPConfig.from_text_vision_configs(A__ , A__ )
if "large" in model_name:
UpperCamelCase = 768
return config
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
UpperCamelCase = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCamelCase = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCamelCase = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCamelCase = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCamelCase = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCamelCase = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCamelCase = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCamelCase = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCamelCase = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCamelCase = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCamelCase = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCamelCase = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCamelCase = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCamelCase = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCamelCase = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCamelCase = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCamelCase = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCamelCase = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCamelCase = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(A__ )
if "attn.in_proj" in key:
UpperCamelCase = key.split('.' )
if key.startswith('visual' ):
UpperCamelCase = key_split[3]
UpperCamelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[
:dim
]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
elif key.startswith('mit' ):
UpperCamelCase = key_split[2]
UpperCamelCase = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = key_split[2]
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(A__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase = val.T
UpperCamelCase = val
return orig_state_dict
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
UpperCamelCase = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCamelCase = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCamelCase = 'eating_spaghetti_32_frames.npy'
UpperCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=A__ , repo_type='dataset' , )
UpperCamelCase = np.load(A__ )
return list(A__ )
def __lowerCamelCase ( A__ , A__=None , A__=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCamelCase = model_to_url[model_name]
UpperCamelCase = 8
if "16-frames" in model_name:
UpperCamelCase = 16
elif "shot" in model_name:
UpperCamelCase = 32
UpperCamelCase = get_xclip_config(A__ , A__ )
UpperCamelCase = XCLIPModel(A__ )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase = 'pytorch_model.bin'
gdown.cached_download(A__ , A__ , quiet=A__ )
UpperCamelCase = torch.load(A__ , map_location='cpu' )['model']
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ )['model']
UpperCamelCase = convert_state_dict(A__ , A__ )
UpperCamelCase = XCLIPModel(A__ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
UpperCamelCase = VideoMAEImageProcessor(size=A__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = XCLIPProcessor(image_processor=A__ , tokenizer=A__ )
UpperCamelCase = prepare_video(A__ )
UpperCamelCase = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=A__ , return_tensors='pt' , padding=A__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase = model(**A__ )
# Verify outputs
UpperCamelCase = outputs.logits_per_video
UpperCamelCase = logits_per_video.softmax(dim=1 )
print('Probs:' , A__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(A__ , A__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(A__ , organization='nielsr' )
processor.push_to_hub(A__ , organization='nielsr' )
slow_tokenizer.push_to_hub(A__ , organization='nielsr' )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
from __future__ import annotations
from math import pi
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 |
from math import isqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]:
__lowerCAmelCase : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 10**8 ) -> int:
__lowerCAmelCase : int = calculate_prime_numbers(max_number // 2 )
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 232 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'deta'
__UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , a=None , a=9_00 , a=20_48 , a=6 , a=20_48 , a=8 , a=6 , a=10_24 , a=8 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a=5 , a=4 , a=4 , a=True , a=3_00 , a=True , a=True , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , **a , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a , a ):
UpperCamelCase__ = backbone_config.pop("model_type" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
UpperCamelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def __a ( self ):
return self.encoder_attention_heads
@property
def __a ( self ):
return self.d_model
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 80 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCamelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCamelCase (a__ :Optional[Any] , a__ :Dict , a__ :str , a__ :Optional[int] , a__ :str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCamelCase__ = getattr(a__ , a__ )
if weight_type is not None:
UpperCamelCase__ = getattr(a__ , a__ ).shape
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
elif weight_type == "running_mean":
UpperCamelCase__ = value
elif weight_type == "running_var":
UpperCamelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ = value
elif weight_type == "inv_freq":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCamelCase (a__ :Tuple , a__ :Union[str, Any] , a__ :Any ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(a__ )[0].split(""".""" )[-2]
UpperCamelCase__ = mapped_key.replace("""*""" , a__ )
if "pos_bias_u" in name:
UpperCamelCase__ = None
elif "pos_bias_v" in name:
UpperCamelCase__ = None
elif "weight_g" in name:
UpperCamelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCamelCase__ = """weight_v"""
elif "bias" in name:
UpperCamelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ = """weight"""
elif "running_mean" in name:
UpperCamelCase__ = """running_mean"""
elif "inv_freq" in name:
UpperCamelCase__ = """inv_freq"""
elif "running_var" in name:
UpperCamelCase__ = """running_var"""
elif "num_batches_tracked" in name:
UpperCamelCase__ = """num_batches_tracked"""
else:
UpperCamelCase__ = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCamelCase (a__ :Tuple , a__ :Optional[int] , a__ :Optional[Any] , a__ :List[str] , a__ :List[Any] ):
"""simple docstring"""
UpperCamelCase__ = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase__ = name.split(""".""" )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def _UpperCamelCase (a__ :List[str] , a__ :Optional[int] , a__ :Optional[Any]=None , a__ :List[Any]=None , a__ :Optional[Any]=True ):
"""simple docstring"""
if config_path is not None:
UpperCamelCase__ = WavaVecaConformerConfig.from_pretrained(a__ , hidden_act="""swish""" )
else:
UpperCamelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase__ = """rotary"""
if is_finetuned:
if dict_path:
UpperCamelCase__ = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ = target_dict.pad_index
UpperCamelCase__ = target_dict.bos_index
UpperCamelCase__ = target_dict.eos_index
UpperCamelCase__ = len(target_dict.symbols )
UpperCamelCase__ = os.path.join(a__ , """vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
UpperCamelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ = 0
UpperCamelCase__ = 1
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a__ , a__ )
UpperCamelCase__ = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a__ , )
UpperCamelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
UpperCamelCase__ = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
UpperCamelCase__ = WavaVecaConformerForCTC(a__ )
else:
UpperCamelCase__ = WavaVecaConformerForPreTraining(a__ )
if is_finetuned:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCamelCase__ = argparse.Namespace(task="""audio_pretraining""" )
UpperCamelCase__ = fairseq.tasks.setup_task(a__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ )
UpperCamelCase__ = model[0].eval()
recursively_load_weights(a__ , a__ , not is_finetuned )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 87 |
UpperCamelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _UpperCamelCase (a__ :float , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = from_type.lower().strip("""s""" )
UpperCamelCase__ = to_type.lower().strip("""s""" )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | 1 |
import re
from filelock import FileLock
try:
import nltk
snake_case_ = True
except (ImportError, ModuleNotFoundError):
snake_case_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
re.sub('<n>' , '' , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 214 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ = datasets.utils.logging.get_logger(__name__)
snake_case_ = ['''names''', '''prefix''']
snake_case_ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case_ = ['''encoding_errors''', '''on_bad_lines''']
snake_case_ = ['''date_format''']
@dataclass
class SCREAMING_SNAKE_CASE__ (datasets.BuilderConfig ):
__lowerCamelCase : str = ","
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
__lowerCamelCase : Optional[List[str]] = None
__lowerCamelCase : Optional[List[str]] = None
__lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
__lowerCamelCase : Optional[Union[List[int], List[str]]] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__lowerCamelCase : Optional[list] = None
__lowerCamelCase : Optional[list] = None
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[Union[int, List[int]]] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[Union[str, List[str]]] = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : str = "."
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : str = '"'
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = True
__lowerCamelCase : int = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : int = 1_0000
__lowerCamelCase : Optional[datasets.Features] = None
__lowerCamelCase : Optional[str] = "strict"
__lowerCamelCase : Literal["error", "warn", "skip"] = "error"
__lowerCamelCase : Optional[str] = None
def snake_case_ ( self):
if self.delimiter is not None:
lowercase__ : List[Any] = self.delimiter
if self.column_names is not None:
lowercase__ : Optional[int] = self.column_names
@property
def snake_case_ ( self):
lowercase__ : Dict = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ (datasets.ArrowBasedBuilder ):
__lowerCamelCase : Optional[Any] = CsvConfig
def snake_case_ ( self):
return datasets.DatasetInfo(features=self.config.features)
def snake_case_ ( self , a):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
lowercase__ : Any = dl_manager.download_and_extract(self.config.data_files)
if isinstance(a , (str, list, tuple)):
lowercase__ : List[str] = data_files
if isinstance(a , a):
lowercase__ : Optional[Any] = [files]
lowercase__ : Optional[int] = [dl_manager.iter_files(a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
lowercase__ : int = []
for split_name, files in data_files.items():
if isinstance(a , a):
lowercase__ : Optional[int] = [files]
lowercase__ : Tuple = [dl_manager.iter_files(a) for file in files]
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={'files': files}))
return splits
def snake_case_ ( self , a):
if self.config.features is not None:
lowercase__ : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(a) for feature in self.config.features.values()):
# cheaper cast
lowercase__ : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase__ : Optional[Any] = table_cast(a , a)
return pa_table
def snake_case_ ( self , a):
lowercase__ : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase__ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a)):
lowercase__ : int = pd.read_csv(a , iterator=a , dtype=a , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(a):
lowercase__ : List[str] = pa.Table.from_pandas(a)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a)
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a)}: {e}""")
raise
| 214 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''', out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
SCREAMING_SNAKE_CASE_ = MaskFormerConfig(backbone_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 8_47
SCREAMING_SNAKE_CASE_ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 1_50
SCREAMING_SNAKE_CASE_ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 1_71
SCREAMING_SNAKE_CASE_ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE_ = 1_33
SCREAMING_SNAKE_CASE_ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 19
SCREAMING_SNAKE_CASE_ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 65
SCREAMING_SNAKE_CASE_ = '''mapillary-vistas-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-dim :]
# fmt: on
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-hidden_size :]
# fmt: on
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
SCREAMING_SNAKE_CASE_ = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase, '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE_ = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase, __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
SCREAMING_SNAKE_CASE_ = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase, param.shape )
SCREAMING_SNAKE_CASE_ = model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE_ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE_ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE_ = 6_55_35
else:
SCREAMING_SNAKE_CASE_ = 2_55
SCREAMING_SNAKE_CASE_ = True if '''ade''' in model_name else False
SCREAMING_SNAKE_CASE_ = MaskFormerImageProcessor(ignore_index=__lowerCamelCase, reduce_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = image_processor(__lowerCamelCase, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
print('''Logits:''', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowerCamelCase, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you\'d like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 367 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 257 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "t5"
a = ["past_key_values"]
a = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __lowerCamelCase : str=3_2128 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Optional[Any]=6 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=8 , __lowerCamelCase : int=32 , __lowerCamelCase : Dict=128 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=1e-6 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : str="relu" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Union[str, Any]=1 , **__lowerCamelCase : List[Any] , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = feed_forward_proj
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE__ = act_info[-1]
SCREAMING_SNAKE_CASE__ = act_info[0] == '''gated'''
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE__ = '''gelu_new'''
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase , )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
SCREAMING_SNAKE_CASE__ = '''past_encoder_sequence + sequence'''
SCREAMING_SNAKE_CASE__ = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction='''inputs''' )
return common_inputs
@property
def lowercase_ ( self : Tuple ) -> int:
return 13
| 314 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314 | 1 |
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77 |
"""simple docstring"""
from math import factorial
def _A ( _a : int = 1_0_0 ):
"""simple docstring"""
return sum(map(_a , str(factorial(_a ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 77 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
lowercase__ : Optional[Any] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
lowercase__ : Optional[Any] = "</w>"
lowercase__ : int = "@@ "
def A_ ( snake_case : Dict ) -> int:
'''simple docstring'''
__UpperCamelCase = set()
__UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase = char
return pairs
# Speech2Text2 has no max input length
lowercase__ : Dict = {"facebook/s2t-wav2vec2-large-en-de": 1_0_2_4}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , )-> Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = do_lower_case
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
__UpperCamelCase = json.load(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
__UpperCamelCase = None
__UpperCamelCase = None
else:
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
__UpperCamelCase = merges_handle.read().split('''\n''' )[:-1]
__UpperCamelCase = [tuple(merge.split()[:2] ) for merge in merges]
__UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__UpperCamelCase = {}
@property
def A__ ( self )-> int:
'''simple docstring'''
return len(self.decoder )
def A__ ( self )-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__UpperCamelCase = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase = bigram
__UpperCamelCase = []
__UpperCamelCase = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__UpperCamelCase = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase = tuple(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if word == "\n " + BPE_TOKEN_MERGES:
__UpperCamelCase = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = word.replace(SCREAMING_SNAKE_CASE_ , '''''' )
__UpperCamelCase = word.replace(''' ''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = word
return word
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
__UpperCamelCase = text.lower()
__UpperCamelCase = text.split()
__UpperCamelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
return result
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE_ )
# make sure @@ tokens are concatenated
__UpperCamelCase = ''''''.join(string.split(SCREAMING_SNAKE_CASE_ ) )
return string
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
__UpperCamelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
__UpperCamelCase = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 328 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : Union[str, Any] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 109 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ = array[indexa], array[indexa]
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 109 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case = '\\n Text data.\n Second line of data.'
_snake_case = 'file'
@pytest.fixture(scope="session" )
def _A ( snake_case ) -> List[Any]:
_lowercase : List[Any] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowercase : str = bytes(A__ , "utf-8" )
with zstd.open(A__ , "wb" ) as f:
f.write(A__ )
return path
@pytest.fixture
def _A ( snake_case ) -> Optional[Any]:
with open(os.path.join(tmpfs.local_root_dir , A__ ) , "w" ) as f:
f.write(A__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowercase : List[Any] = input_paths[compression_format]
_lowercase : int = tmp_path / "cache"
_lowercase : Optional[int] = DownloadConfig(cache_dir=A__ , extract_compressed_file=A__ )
_lowercase : str = cached_path(A__ , download_config=A__ )
with open(A__ ) as f:
_lowercase : Any = f.read()
with open(A__ ) as f:
_lowercase : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : Optional[Any] = "custom_cache"
_lowercase : str = "custom_extracted_dir"
_lowercase : Dict = tmp_path / "custom_extracted_path"
if default_extracted:
_lowercase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , A__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A__ ) )
_lowercase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowercase : str = xz_file
_lowercase : Optional[Any] = (
DownloadConfig(extract_compressed_file=A__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=A__ )
)
_lowercase : Tuple = cached_path(A__ , download_config=A__ )
assert Path(A__ ).parent.parts[-2:] == expected
def _A ( snake_case ) -> Any:
# absolute path
_lowercase : Union[str, Any] = str(Path(A__ ).resolve() )
assert cached_path(A__ ) == text_file
# relative path
_lowercase : Dict = str(Path(A__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(A__ ) == text_file
def _A ( snake_case ) -> int:
# absolute path
_lowercase : Dict = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(A__ ):
cached_path(A__ )
# relative path
_lowercase : Any = "./__missing_file__.txt"
with pytest.raises(A__ ):
cached_path(A__ )
def _A ( snake_case ) -> Any:
_lowercase : Union[str, Any] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(A__ ) as f:
_lowercase : Optional[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( ) -> Optional[Any]:
with pytest.raises(A__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( snake_case ) -> Optional[Any]:
_lowercase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(A__ ):
http_get("https://huggingface.co" , temp_file=A__ )
with pytest.raises(A__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( snake_case ) -> Optional[int]:
_lowercase : str = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(A__ ):
ftp_get("ftp://huggingface.co" , temp_file=A__ )
with pytest.raises(A__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( snake_case ) -> Optional[Any]:
_lowercase : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(A__ ):
fsspec_get("s3://huggingface.co" , temp_file=A__ )
with pytest.raises(A__ ):
fsspec_head("s3://huggingface.co" )
| 250 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 0 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> bool:
lowerCAmelCase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase : Any = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase : List[str] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 ):
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE : float ) -> float:
return x
lowerCAmelCase : Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase : Dict = area_under_curve_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE ) )
| 133 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (DDPMScheduler,)
def _snake_case ( self ,**a_ ) -> List[str]:
_UpperCAmelCase : List[str] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**a_ )
return config
def _snake_case ( self ) -> Any:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ )
def _snake_case ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ ,beta_end=a_ )
def _snake_case ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def _snake_case ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def _snake_case ( self ) -> int:
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ ,prediction_type=a_ ,sample_max_value=a_ ,)
def _snake_case ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _snake_case ( self ) -> Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : List[Any] = len(a_ )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Tuple = self.dummy_sample_deter
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : List[Any] = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Union[str, Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : str = pred_prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : str = scheduler_class(**a_ )
_UpperCAmelCase : Tuple = len(a_ )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : Dict = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : Tuple = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
_UpperCAmelCase : str = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
_UpperCAmelCase : Any = -1
else:
_UpperCAmelCase : Union[str, Any] = timesteps[i + 1]
_UpperCAmelCase : str = scheduler.previous_timestep(a_ )
_UpperCAmelCase : List[str] = prev_t.item()
self.assertEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**a_ )
_UpperCAmelCase : int = [100, 87, 50, 51, 0]
with self.assertRaises(a_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=a_ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**a_ )
_UpperCAmelCase : Any = [100, 87, 50, 1, 0]
_UpperCAmelCase : Any = len(a_ )
with self.assertRaises(a_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=a_ ,timesteps=a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**a_ )
_UpperCAmelCase : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=a_ )
| 215 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def snake_case_ ( lowerCAmelCase_ )-> typing.Counter[int]:
'''simple docstring'''
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_UpperCAmelCase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case_ ( lowerCAmelCase_ = 1000 )-> int:
'''simple docstring'''
_UpperCAmelCase : int = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 215 | 1 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( __a : int ,__a : List[Any] ) -> Optional[int]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( __a : Any ) -> Optional[Any]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( __a : Any ,__a : List[str] ,*__a : Any ) -> Dict:
"""simple docstring"""
try:
return fun(__a ,*__a ), None
except Exception as e:
return None, e
a__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
a__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
a__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
a__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' ,(
pytest.param(_add_items ,id='''add items''' ),
pytest.param(_overwrite_items ,id='''overwrite items''' ),
pytest.param(_delete_items ,id='''delete items''' ),
pytest.param(_access_absent_items ,id='''access absent items''' ),
pytest.param(_add_with_resize_up ,id='''add with resize up''' ),
pytest.param(_add_with_resize_down ,id='''add with resize down''' ),
) ,)
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : str = HashMap(initial_block_size=4 )
_a : Union[str, Any] = {}
for _, (fun, *args) in enumerate(__a ):
_a : List[Any] = _run_operation(__a ,__a ,*__a )
_a : List[Any] = _run_operation(__a ,__a ,*__a )
assert my_res == py_res
assert str(__a ) == str(__a )
assert set(__a ) == set(__a )
assert len(__a ) == len(__a )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
def is_public(__a : str ) -> bool:
return not name.startswith('''_''' )
_a : List[Any] = {name for name in dir({} ) if is_public(__a )}
_a : Tuple = {name for name in dir(HashMap() ) if is_public(__a )}
assert dict_public_names > hash_public_names
| 352 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( lowercase__ ):
'''simple docstring'''
__UpperCamelCase = """dpr"""
def __init__( self : Dict , lowercase_ : Tuple=30522 , lowercase_ : Optional[int]=768 , lowercase_ : Any=12 , lowercase_ : Optional[Any]=12 , lowercase_ : Dict=3072 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[str]=0 , lowercase_ : Optional[int]="absolute" , lowercase_ : int = 0 , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A)
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = projection_dim
SCREAMING_SNAKE_CASE_ : int = position_embedding_type
| 91 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
lowercase__ = """glpn"""
def __init__( self : Any ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : Optional[Any]=[2, 2, 2, 2] ,lowerCamelCase__ : int=[8, 4, 2, 1] ,lowerCamelCase__ : Union[str, Any]=[32, 64, 160, 256] ,lowerCamelCase__ : Dict=[7, 3, 3, 3] ,lowerCamelCase__ : Union[str, Any]=[4, 2, 2, 2] ,lowerCamelCase__ : List[Any]=[1, 2, 5, 8] ,lowerCamelCase__ : Tuple=[4, 4, 4, 4] ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : Optional[int]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : str=1E-6 ,lowerCamelCase__ : Optional[Any]=64 ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : Dict=-1 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = num_encoder_blocks
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : List[Any] = sr_ratios
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = patch_sizes
_UpperCamelCase : str = strides
_UpperCamelCase : Dict = mlp_ratios
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Optional[Any] = drop_path_rate
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : List[str] = decoder_hidden_size
_UpperCamelCase : Dict = max_depth
_UpperCamelCase : Any = head_in_index
| 367 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-roberta-xl"""
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=250880 ,lowerCamelCase__ : Tuple=2560 ,lowerCamelCase__ : Union[str, Any]=36 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Optional[Any]=10240 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=514 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Any=1E-05 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Optional[int] = layer_norm_eps
_UpperCamelCase : Optional[int] = position_embedding_type
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class lowercase__ ( lowercase ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 236 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowercase ( __magic_name__ ):
'''simple docstring'''
for i in range(0 , __magic_name__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def lowercase ( __magic_name__ ):
'''simple docstring'''
for i in range(__magic_name__ , 0 , -1 ):
for _ in range(__magic_name__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(__magic_name__ ) # upper half
reverse_floyd(__magic_name__ ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
a : int = 1
while K:
a : Tuple = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
a : str = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 311 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_a , )
def _lowerCamelCase ( self :Tuple , a :Optional[Any] , a :int ) -> Union[str, Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _lowerCamelCase ( self :Dict , a :Any , a :str ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_a )
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
'''simple docstring'''
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_a , )
def _lowerCamelCase ( self :Any , a :Optional[int] , a :Any ) -> List[str]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _lowerCamelCase ( self :Optional[Any] , a :int , a :List[str] ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_a )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])]
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])]
class lowerCamelCase__ ( _a):
'''simple docstring'''
@require_beam
def _lowerCamelCase ( self :Any ) -> str:
__UpperCamelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_a , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_a , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Any = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _a )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _a )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_a , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _lowerCamelCase ( self :int ) -> str:
import apache_beam as beam
__UpperCamelCase : List[Any] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_a , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : int = partial(_a , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_a , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_a , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _a )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _a )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_a , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _lowerCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Union[str, Any] = DummyBeamDataset(cache_dir=_a )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]:
__UpperCamelCase : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = NestedBeamDataset(cache_dir=_a , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_a , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _a )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _a )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_a , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset | 370 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Any = 16
lowercase : Optional[int] = 32
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16) -> int:
'''simple docstring'''
__UpperCamelCase : Any = AutoTokenizer.from_pretrained("bert-base-cased")
__UpperCamelCase : Optional[Any] = load_dataset("glue" , "mrpc")
def tokenize_function(_lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase : Optional[int] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase : List[str] = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_lowerCamelCase : Union[str, Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase : Dict = 8
else:
__UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__UpperCamelCase : Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase)
__UpperCamelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase) == "1":
__UpperCamelCase : List[str] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir)
else:
__UpperCamelCase : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase : List[str] = config["lr"]
__UpperCamelCase : Optional[Any] = int(config["num_epochs"])
__UpperCamelCase : List[Any] = int(config["seed"])
__UpperCamelCase : Any = int(config["batch_size"])
set_seed(_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase : List[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : List[str] = evaluate.load("glue" , "mrpc")
# If the batch size is too big we use gradient accumulation
__UpperCamelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase : Optional[int] = model.to(accelerator.device)
# Instantiate optimizer
__UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=_lowerCamelCase)
# Instantiate scheduler
__UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase : Dict = os.path.split(_lowerCamelCase)[-1].split(".")[0]
accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase)
# Now we train the model
for epoch in range(_lowerCamelCase):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase : Tuple = 0
for step, batch in enumerate(_lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
__UpperCamelCase : Dict = model(**_lowerCamelCase)
__UpperCamelCase : Any = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device)
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(**_lowerCamelCase)
__UpperCamelCase : str = outputs.logits.argmax(dim=-1)
__UpperCamelCase , __UpperCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__UpperCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCamelCase)
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCamelCase),
"epoch": epoch,
} , step=_lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : str = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.")
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : str = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main() | 151 | 0 |
'''simple docstring'''
from math import sqrt
def snake_case_ (_a : Optional[Any] = 1_0_0_0_0_0_0 ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
from __future__ import annotations
import time
import numpy as np
lowercase : Optional[Any] = [8, 5, 9, 7]
lowercase : Union[str, Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __snake_case :
def __init__( self ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = claim_vector
lowercase : Union[str, Any] = allocated_resources_table
lowercase : Dict = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {self.__need().index(snake_case ): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Any = self.__need()
lowercase : List[str] = self.__allocated_resources_table
lowercase : int = self.__available_resources()
lowercase : int = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
lowercase : List[str] = False
for each_need in need_list:
lowercase : Optional[Any] = True
for index, need in enumerate(snake_case ):
if need > available_resources[index]:
lowercase : Optional[Any] = False
break
if execution:
lowercase : Optional[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase : Union[str, Any] = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(snake_case )
# update available/freed resources stack
lowercase : Dict = np.array(snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(snake_case ) + 1}"
+ """ """.join(f"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(snake_case ) + 1}"
+ """ """.join(f"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowercase : Any = """=======
>>>>>>>
"""
lowercase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __snake_case ( lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=snake_case ,required=snake_case ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=snake_case ,required=snake_case ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case )
def __init__( self ,snake_case ,snake_case ,*snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = get_logger("""datasets-cli/converting""" )
lowercase : Optional[int] = tfds_path
lowercase : Dict = datasets_directory
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
lowercase : List[Any] = []
lowercase : Optional[int] = []
lowercase : Dict = {}
if os.path.isdir(self._tfds_path ):
lowercase : int = os.listdir(snake_case )
else:
lowercase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
lowercase : List[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case ,encoding="""utf-8""" ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = []
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[int] = []
for line in lines:
lowercase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase : Union[str, Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
lowercase : Optional[int] = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase : Any = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase : Optional[Any] = True
lowercase : Optional[Any] = list(filter(lambda snake_case : e in out_line ,snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + """\n""" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase : Union[str, Any] = re.sub(snake_case ,snake_case ,snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase : Union[str, Any] = f_name.replace(""".py""" ,"""""" )
lowercase : Optional[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
os.makedirs(snake_case ,exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
lowercase : Optional[int] = os.path.basename(snake_case )
lowercase : int = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case ,snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 285 | 0 |
'''simple docstring'''
lowercase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 211 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : CommonSchedulerState
# setable values
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : Optional[int] = None
@classmethod
def a__ (cls , A , A , A ) -> str:
"""simple docstring"""
return cls(common=A , init_noise_sigma=A , timesteps=A )
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : DDPMSchedulerState
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase : jnp.dtype
@property
def a__ (self ) -> List[str]:
"""simple docstring"""
return True
@register_to_config
def __init__(self , A = 1_000 , A = 0.0001 , A = 0.02 , A = "linear" , A = None , A = "fixed_small" , A = True , A = "epsilon" , A = jnp.floataa , ) -> Union[str, Any]:
"""simple docstring"""
_a = dtype
def a__ (self , A = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
_a = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_a = jnp.array(1.0 , dtype=self.dtype )
_a = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A , init_noise_sigma=A , timesteps=A , )
def a__ (self , A , A , A = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ (self , A , A , A = () ) -> DDPMSchedulerState:
"""simple docstring"""
_a = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_a = (jnp.arange(0 , A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A , timesteps=A , )
def a__ (self , A , A , A=None , A=None ) -> int:
"""simple docstring"""
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_a = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_a = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_a = jnp.clip(A , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_a = jnp.log(jnp.clip(A , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_a = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_a = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_a = variance
_a = state.common.betas[t]
_a = (predicted_variance + 1) / 2
_a = frac * max_log + (1 - frac) * min_log
return variance
def a__ (self , A , A , A , A , A = None , A = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_a = timestep
if key is None:
_a = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_a , _a = jnp.split(A , sample.shape[1] , axis=1 )
else:
_a = None
# 1. compute alphas, betas
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_a = model_output
elif self.config.prediction_type == "v_prediction":
_a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_a = jnp.clip(A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_a = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_a = jax.random.split(A , num=1 )
_a = jax.random.normal(A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A , A , predicted_variance=A ) ** 0.5) * noise
_a = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A , state=A )
def a__ (self , A , A , A , A , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , A , A , A )
def a__ (self , A , A , A , A , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , A , A , A )
def __len__(self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 211 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__lowercase = tuple[int, int]
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : set[int] , UpperCamelCase__ : Mapping[EdgeT, int] ) -> None:
'''simple docstring'''
__UpperCamelCase =vertices
__UpperCamelCase ={
(min(UpperCamelCase__ ), max(UpperCamelCase__ )): weight for edge, weight in edges.items()
}
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : EdgeT , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__UpperCamelCase =weight
def UpperCAmelCase_ ( self : List[Any] ) -> Graph:
'''simple docstring'''
__UpperCamelCase =Graph({min(self.vertices )} , {} )
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =42
while len(subgraph.vertices ) < len(self.vertices ):
__UpperCamelCase =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__UpperCamelCase =edge
__UpperCamelCase =weight
subgraph.add_edge(UpperCamelCase__ , UpperCamelCase__ )
return subgraph
def lowerCAmelCase (__UpperCamelCase : str = "p107_network.txt" ):
"""simple docstring"""
__UpperCamelCase =os.path.abspath(os.path.dirname(__UpperCamelCase ) )
__UpperCamelCase =os.path.join(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase ={}
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =42
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read().strip().split('''\n''' )
__UpperCamelCase =[line.split(''',''' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
__UpperCamelCase =int(adjaceny_matrix[edgea][edgea] )
__UpperCamelCase =Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
__UpperCamelCase =graph.prims_algorithm()
__UpperCamelCase =sum(graph.edges.values() )
__UpperCamelCase =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 85 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =[0 for i in range(r + 1 )]
# nc0 = 1
__UpperCamelCase =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__UpperCamelCase =min(__UpperCamelCase , __UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['DPTFeatureExtractor']
lowerCAmelCase = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = credit_card_number
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 2
for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowercase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase__ = cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 110 | 1 |
from timeit import timeit
__UpperCamelCase : Dict = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : str = len(_lowercase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = len(_lowercase ) // 2
SCREAMING_SNAKE_CASE : List[str] = len(_lowercase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowercase ) )
def A ( _lowercase ):
if len(_lowercase ) <= 2:
return True
if s[0] == s[len(_lowercase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def A ( _lowercase ):
return s == s[::-1]
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = f"""all({name}(key) is value for key, value in test_data.items())"""
SCREAMING_SNAKE_CASE : str = f"""from __main__ import test_data, {name}"""
SCREAMING_SNAKE_CASE : List[Any] = 500_000
SCREAMING_SNAKE_CASE : List[Any] = timeit(stmt=_lowercase , setup=_lowercase , number=_lowercase )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 258 | import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCamelCase : Union[str, Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def A ( _lowercase ):
with open(_lowercase , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Union[str, Any]="<cls>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any="<eos>" , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_vocab_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE : Union[str, Any] = unk_token
SCREAMING_SNAKE_CASE : Any = cls_token
SCREAMING_SNAKE_CASE : List[str] = pad_token
SCREAMING_SNAKE_CASE : List[str] = mask_token
SCREAMING_SNAKE_CASE : Any = eos_token
SCREAMING_SNAKE_CASE : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return text.split()
def __A ( self : List[str] , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
return len(self._id_to_token )
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __A ( self : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self : List[str] , UpperCamelCase__ : int ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __A ( self : Union[str, Any] , UpperCamelCase__ : List , UpperCamelCase__ : Optional[List] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE : List[str] = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCamelCase__ ) + [1]
return mask
def __A ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = os.path.join(UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __A ( self : Dict ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : Union[List[str], List[AddedToken]] , UpperCamelCase__ : bool = False ):
'''simple docstring'''
return super()._add_tokens(UpperCamelCase__ , special_tokens=UpperCamelCase__ )
| 258 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case = [ord(letter) for letter in string.ascii_lowercase]
_snake_case = {ord(char) for char in VALID_CHARS}
_snake_case = ['''the''', '''be''', '''to''', '''of''', '''and''', '''in''', '''that''', '''have''']
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str | None:
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : str = 42
__UpperCAmelCase : List[Any] = 42
__UpperCAmelCase : Optional[Any] = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase_ ), UpperCamelCase_ ):
__UpperCAmelCase : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase_ )
return decoded
def _UpperCamelCase ( snake_case__ ) -> list[str]:
__UpperCAmelCase : Optional[Any] = []
for key in product(UpperCamelCase_, repeat=3 ):
__UpperCAmelCase : int = try_key(UpperCamelCase_, UpperCamelCase_ )
if encoded is not None:
possibles.append(UpperCamelCase_ )
return possibles
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( snake_case__ = "p059_cipher.txt" ) -> int:
__UpperCAmelCase : Dict = 42
__UpperCAmelCase : List[Any] = 42
__UpperCAmelCase : Tuple = 42
__UpperCAmelCase : int = 42
__UpperCAmelCase : List[str] = Path(UpperCamelCase_ ).parent.joinpath(UpperCamelCase_ ).read_text(encoding="utf-8" )
__UpperCAmelCase : Any = [int(UpperCamelCase_ ) for number in data.strip().split("," )]
__UpperCAmelCase : List[Any] = filter_valid_chars(UpperCamelCase_ )
for common_word in COMMON_WORDS:
__UpperCAmelCase : Union[str, Any] = filter_common_word(UpperCamelCase_, UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
break
__UpperCAmelCase : Dict = possibles[0]
return sum(ord(UpperCamelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 157 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
if num < 0:
return False
SCREAMING_SNAKE_CASE__ = num
SCREAMING_SNAKE_CASE__ = 0
while num > 0:
SCREAMING_SNAKE_CASE__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case = 25_60_47
__snake_case = 25_61_45
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : int =NllbTokenizer
A__ : Optional[int] =NllbTokenizerFast
A__ : Union[str, Any] =True
A__ : Dict =True
A__ : Tuple ={}
def A_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = NllbTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = NllbTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
def A_ ( self : Tuple ):
if not self.test_seqaseq:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
SCREAMING_SNAKE_CASE__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase_ , tgt_texts=UpperCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch(
UpperCAmelCase_ , tgt_texts=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def A_ ( self : List[Any] ):
pass
def A_ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__ = [AddedToken('<special>' , lstrip=UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode('Hey this is a <special> token' )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode('<special>' , add_special_tokens=UpperCAmelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode('Hey this is a <special> token' )
SCREAMING_SNAKE_CASE__ = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
A__ : List[Any] ="""facebook/nllb-200-distilled-600M"""
A__ : Tuple =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
A__ : Optional[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
A__ : Optional[int] =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def A_ ( cls : Tuple ):
SCREAMING_SNAKE_CASE__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
SCREAMING_SNAKE_CASE__ = 1
return cls
def A_ ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def A_ ( self : Dict ):
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE__ = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = NllbTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = targets['input_ids']
SCREAMING_SNAKE_CASE__ = shift_tokens_right(
UpperCAmelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[256047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 256057,
} , )
@require_torch
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 169 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
_UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 39 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364 |
from maths.prime_factors import prime_factors
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__A : List[str] = 0
__A : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__A : Tuple = tuple[int, int]
class __A :
def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Node | None , ):
lowerCAmelCase : Union[str, Any] = pos_x
lowerCAmelCase : Dict = pos_y
lowerCAmelCase : Dict = (pos_y, pos_x)
lowerCAmelCase : List[str] = goal_x
lowerCAmelCase : List[str] = goal_y
lowerCAmelCase : int = g_cost
lowerCAmelCase : str = parent
lowerCAmelCase : str = self.calculate_heuristic()
lowerCAmelCase : int = self.g_cost + self.h_cost
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = self.pos_x - self.goal_x
lowerCAmelCase : int = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase_ ) + abs(UpperCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Tuple , UpperCAmelCase_ : Node ):
return self.f_cost < other.f_cost
class __A :
def __init__( self : Tuple , UpperCAmelCase_ : TPosition , UpperCAmelCase_ : TPosition ):
lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_ )
lowerCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , UpperCAmelCase_ )
lowerCAmelCase : Any = [self.start]
lowerCAmelCase : list[Node] = []
lowerCAmelCase : Tuple = False
def lowercase__ ( self : Any ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase_ )
self.closed_nodes.append(UpperCAmelCase_ )
lowerCAmelCase : Dict = self.get_successors(UpperCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
lowerCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_ )
else:
self.open_nodes.append(UpperCAmelCase_ )
return [self.start.pos]
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Node ):
lowerCAmelCase : Optional[Any] = []
for action in delta:
lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
lowerCAmelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ) )
return successors
def lowercase__ ( self : Dict , UpperCAmelCase_ : Node | None ):
lowerCAmelCase : int = node
lowerCAmelCase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase : List[str] = current_node.parent
path.reverse()
return path
class __A :
def __init__( self : int , UpperCAmelCase_ : TPosition , UpperCAmelCase_ : TPosition ):
lowerCAmelCase : List[str] = AStar(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[str] = AStar(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = False
def lowercase__ ( self : Union[str, Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCAmelCase : Optional[int] = self.fwd_astar.open_nodes.pop(0 )
lowerCAmelCase : Any = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_ )
self.fwd_astar.closed_nodes.append(UpperCAmelCase_ )
self.bwd_astar.closed_nodes.append(UpperCAmelCase_ )
lowerCAmelCase : str = current_bwd_node
lowerCAmelCase : List[Any] = current_fwd_node
lowerCAmelCase : List[str] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
lowerCAmelCase : int = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase_ )
else:
astar.open_nodes.append(UpperCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node ):
lowerCAmelCase : List[str] = self.fwd_astar.retrace_path(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(UpperCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__A : Optional[int] = (0, 0)
__A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A : Optional[Any] = time.time()
__A : List[Any] = AStar(init, goal)
__A : List[str] = a_star.search()
__A : Union[str, Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__A : Union[str, Any] = time.time()
__A : Dict = BidirectionalAStar(init, goal)
__A : List[Any] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 138 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : List[Any] = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = "ctrl"
lowerCAmelCase_ : Optional[Any] = ["past_key_values"]
lowerCAmelCase_ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , UpperCAmelCase_ : int=246534 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Any=1280 , UpperCAmelCase_ : int=8192 , UpperCAmelCase_ : int=48 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=1E-6 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : int , ):
lowerCAmelCase : int = vocab_size
lowerCAmelCase : int = n_positions
lowerCAmelCase : Optional[Any] = n_embd
lowerCAmelCase : Optional[Any] = n_layer
lowerCAmelCase : List[str] = n_head
lowerCAmelCase : Union[str, Any] = dff
lowerCAmelCase : Dict = resid_pdrop
lowerCAmelCase : List[Any] = embd_pdrop
lowerCAmelCase : List[Any] = layer_norm_epsilon
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = use_cache
super().__init__(**UpperCAmelCase_ )
| 138 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(A_ ).json()
def __SCREAMING_SNAKE_CASE ( A_ = 10 ):
lowerCAmelCase__ : int = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase__ : Optional[int] = requests.get(A_ ).json()[:max_stories]
return [get_hackernews_story(A_ ) for story_id in story_ids]
def __SCREAMING_SNAKE_CASE ( A_ = 10 ):
lowerCAmelCase__ : Tuple = hackernews_top_stories(A_ )
return "\n".join('''* [{title}]({url})'''.format(**A_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 74 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 74 | 1 |