# File: optimum-main/docs/combine_docs.py import argparse import shutil from pathlib import Path from typing import Dict, List import yaml SUBPACKAGE_TOC_INSERT_INDEX = 2 parser = argparse.ArgumentParser(description='Script to combine doc builds from subpackages with base doc build of Optimum. Assumes all subpackage doc builds are present in the root of the `optimum` repo.') parser.add_argument('--subpackages', nargs='+', help='Subpackages to integrate docs with Optimum. Use hardware partner names like `habana`, `graphcore`, or `intel`') parser.add_argument('--version', type=str, default='main', help='The version of the Optimum docs') def rename_subpackage_toc(subpackage: str, toc: Dict): for item in toc: for file in item['sections']: if 'local' in file: file['local'] = f'{subpackage}/' + file['local'] else: rename_subpackage_toc(subpackage, [file]) def rename_copy_subpackage_html_paths(subpackage: str, subpackage_path: Path, optimum_path: Path, version: str): subpackage_html_paths = list(subpackage_path.rglob('*.html')) language_folder_level = 3 for html_path in subpackage_html_paths: language_folder = html_path.parts[language_folder_level] relative_path_from_language_folder = Path(*html_path.parts[language_folder_level + 1:]) new_path_in_optimum = Path(f'{optimum_path}/optimum/{version}/{language_folder}/{subpackage}/{relative_path_from_language_folder}') new_path_in_optimum.parent.mkdir(parents=True, exist_ok=True) shutil.copyfile(html_path, new_path_in_optimum) def add_neuron_doc(base_toc: List): base_toc.insert(SUBPACKAGE_TOC_INSERT_INDEX, {'sections': [{'local': 'docs/optimum-neuron/index', 'title': '🤗 Optimum Neuron'}], 'title': 'AWS Trainium/Inferentia', 'isExpanded': False}) def add_tpu_doc(base_toc: List): base_toc.insert(SUBPACKAGE_TOC_INSERT_INDEX, {'sections': [{'local': 'docs/optimum-tpu/index', 'title': '🤗 Optimum-TPU'}], 'title': 'Google TPUs', 'isExpanded': False}) def main(): args = parser.parse_args() optimum_path = Path('optimum-doc-build') base_toc_path = next(optimum_path.rglob('_toctree.yml')) with open(base_toc_path, 'r') as f: base_toc = yaml.safe_load(f) for subpackage in args.subpackages[::-1]: if subpackage == 'neuron': add_neuron_doc(base_toc) elif subpackage == 'tpu': add_tpu_doc(base_toc) elif subpackage == 'nvidia': continue else: subpackage_path = Path(f'{subpackage}-doc-build') if subpackage == 'furiosa' and (not subpackage_path.is_dir()): continue rename_copy_subpackage_html_paths(subpackage, subpackage_path, optimum_path, args.version) subpackage_toc_path = next(subpackage_path.rglob('_toctree.yml')) with open(subpackage_toc_path, 'r') as f: subpackage_toc = yaml.safe_load(f) rename_subpackage_toc(subpackage, subpackage_toc) if subpackage == 'amd': subpackage_toc[0]['title'] = subpackage_toc[0]['title'].split('Optimum-')[-1] else: subpackage_toc[0]['title'] = subpackage_toc[0]['title'].split('Optimum ')[-1] if subpackage != 'graphcore': base_toc.insert(SUBPACKAGE_TOC_INSERT_INDEX, subpackage_toc[0]) with open(base_toc_path, 'w') as f: yaml.safe_dump(base_toc, f, allow_unicode=True) if __name__ == '__main__': main() # File: optimum-main/optimum/bettertransformer/models/__init__.py import warnings from ...utils.import_utils import check_if_transformers_greater from .decoder_models import BarkAttentionLayerBetterTransformer, BartAttentionLayerBetterTransformer, BlenderbotAttentionLayerBetterTransformer, BloomAttentionLayerBetterTransformer, CodegenAttentionLayerBetterTransformer, GPT2AttentionLayerBetterTransformer, GPTJAttentionLayerBetterTransformer, GPTNeoAttentionLayerBetterTransformer, GPTNeoXAttentionLayerBetterTransformer, M2M100AttentionLayerBetterTransformer, MarianAttentionLayerBetterTransformer, OPTAttentionLayerBetterTransformer, PegasusAttentionLayerBetterTransformer, T5AttentionLayerBetterTransformer from .encoder_models import AlbertLayerBetterTransformer, BartEncoderLayerBetterTransformer, BertLayerBetterTransformer, CLIPLayerBetterTransformer, DistilBertLayerBetterTransformer, FSMTEncoderLayerBetterTransformer, MBartEncoderLayerBetterTransformer, ProphetNetEncoderLayerBetterTransformer, ViltLayerBetterTransformer, ViTLayerBetterTransformer, Wav2Vec2EncoderLayerBetterTransformer class BetterTransformerManager: MODEL_MAPPING = {'albert': {'AlbertLayer': AlbertLayerBetterTransformer}, 'bark': {'BarkSelfAttention': BarkAttentionLayerBetterTransformer}, 'bart': {'BartEncoderLayer': BartEncoderLayerBetterTransformer, 'BartAttention': BartAttentionLayerBetterTransformer}, 'bert': {'BertLayer': BertLayerBetterTransformer}, 'bert-generation': {'BertGenerationLayer': BertLayerBetterTransformer}, 'blenderbot': {'BlenderbotAttention': BlenderbotAttentionLayerBetterTransformer}, 'bloom': {'BloomAttention': BloomAttentionLayerBetterTransformer}, 'camembert': {'CamembertLayer': BertLayerBetterTransformer}, 'blip-2': {'T5Attention': T5AttentionLayerBetterTransformer}, 'clip': {'CLIPEncoderLayer': CLIPLayerBetterTransformer}, 'codegen': {'CodeGenAttention': CodegenAttentionLayerBetterTransformer}, 'data2vec-text': {'Data2VecTextLayer': BertLayerBetterTransformer}, 'deit': {'DeiTLayer': ViTLayerBetterTransformer}, 'distilbert': {'TransformerBlock': DistilBertLayerBetterTransformer}, 'electra': {'ElectraLayer': BertLayerBetterTransformer}, 'ernie': {'ErnieLayer': BertLayerBetterTransformer}, 'fsmt': {'EncoderLayer': FSMTEncoderLayerBetterTransformer}, 'gpt2': {'GPT2Attention': GPT2AttentionLayerBetterTransformer}, 'gptj': {'GPTJAttention': GPTJAttentionLayerBetterTransformer}, 'gpt_neo': {'GPTNeoSelfAttention': GPTNeoAttentionLayerBetterTransformer}, 'gpt_neox': {'GPTNeoXAttention': GPTNeoXAttentionLayerBetterTransformer}, 'hubert': {'HubertEncoderLayer': Wav2Vec2EncoderLayerBetterTransformer}, 'layoutlm': {'LayoutLMLayer': BertLayerBetterTransformer}, 'm2m_100': {'M2M100EncoderLayer': MBartEncoderLayerBetterTransformer, 'M2M100Attention': M2M100AttentionLayerBetterTransformer}, 'marian': {'MarianEncoderLayer': BartEncoderLayerBetterTransformer, 'MarianAttention': MarianAttentionLayerBetterTransformer}, 'markuplm': {'MarkupLMLayer': BertLayerBetterTransformer}, 'mbart': {'MBartEncoderLayer': MBartEncoderLayerBetterTransformer}, 'opt': {'OPTAttention': OPTAttentionLayerBetterTransformer}, 'pegasus': {'PegasusAttention': PegasusAttentionLayerBetterTransformer}, 'rembert': {'RemBertLayer': BertLayerBetterTransformer}, 'prophetnet': {'ProphetNetEncoderLayer': ProphetNetEncoderLayerBetterTransformer}, 'roberta': {'RobertaLayer': BertLayerBetterTransformer}, 'roc_bert': {'RoCBertLayer': BertLayerBetterTransformer}, 'roformer': {'RoFormerLayer': BertLayerBetterTransformer}, 'splinter': {'SplinterLayer': BertLayerBetterTransformer}, 'tapas': {'TapasLayer': BertLayerBetterTransformer}, 't5': {'T5Attention': T5AttentionLayerBetterTransformer}, 'vilt': {'ViltLayer': ViltLayerBetterTransformer}, 'vit': {'ViTLayer': ViTLayerBetterTransformer}, 'vit_mae': {'ViTMAELayer': ViTLayerBetterTransformer}, 'vit_msn': {'ViTMSNLayer': ViTLayerBetterTransformer}, 'wav2vec2': {'Wav2Vec2EncoderLayer': Wav2Vec2EncoderLayerBetterTransformer, 'Wav2Vec2EncoderLayerStableLayerNorm': Wav2Vec2EncoderLayerBetterTransformer}, 'xlm-roberta': {'XLMRobertaLayer': BertLayerBetterTransformer}, 'yolos': {'YolosLayer': ViTLayerBetterTransformer}} OVERWRITE_METHODS = {} EXCLUDE_FROM_TRANSFORM = {'clip': ['text_model'], 'blip-2': ['qformer.encoder.layer', 'vision_model.encoder.layers'], 'bark': ['codec_model.encoder.layers']} CAN_NOT_BE_SUPPORTED = {'deberta-v2': "DeBERTa v2 does not use a regular attention mechanism, which is not supported in PyTorch's BetterTransformer.", 'glpn': "GLPN has a convolutional layer present in the FFN network, which is not supported in PyTorch's BetterTransformer."} NOT_REQUIRES_NESTED_TENSOR = {'bark', 'blenderbot', 'bloom', 'codegen', 'gpt2', 'gptj', 'gpt_neo', 'gpt_neox', 'opt', 'pegasus', 't5'} NOT_REQUIRES_STRICT_VALIDATION = {'blenderbot', 'blip-2', 'bloom', 'codegen', 'gpt2', 'gptj', 'gpt_neo', 'gpt_neox', 'opt', 'pegasus', 't5'} @staticmethod def cannot_support(model_type: str) -> bool: return model_type in BetterTransformerManager.CAN_NOT_BE_SUPPORTED @staticmethod def supports(model_type: str) -> bool: return model_type in BetterTransformerManager.MODEL_MAPPING @staticmethod def requires_nested_tensor(model_type: str) -> bool: return model_type not in BetterTransformerManager.NOT_REQUIRES_NESTED_TENSOR @staticmethod def requires_strict_validation(model_type: str) -> bool: return model_type not in BetterTransformerManager.NOT_REQUIRES_STRICT_VALIDATION class warn_uncompatible_save(object): def __init__(self, callback): self.callback = callback def __enter__(self): return self def __exit__(self, ex_typ, ex_val, traceback): return True def __call__(self, *args, **kwargs): warnings.warn('You are calling `save_pretrained` to a `BetterTransformer` converted model you may likely encounter unexepected behaviors. ', UserWarning) return self.callback(*args, **kwargs) # File: optimum-main/optimum/bettertransformer/models/attention.py from typing import Optional, Tuple import torch import torch.nn.functional as F from ...utils import check_if_transformers_greater def raise_on_head_mask(head_mask: Optional[torch.Tensor]): if head_mask is not None: raise ValueError('layer_head_mask (or head_mask) different than None is unsupported for now with BetterTransformer, pleaseopen a PR or an issue at https://github.com/huggingface/optimum.') def gpt2_wrapped_scaled_dot_product(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None): raise_on_head_mask(head_mask) batch_size = query.shape[0] mask_value = torch.finfo(value.dtype).min mask_value = torch.full([], mask_value, dtype=value.dtype) if self.downcast_qk: query = query.to(value.dtype) key = key.to(value.dtype) if batch_size == 1 and attention_mask is not None and (attention_mask[0, 0, -1, -1] < -1): raise ValueError("BetterTransformer does not support padding='max_length' with a batch size of 1.") dropout_p = self.dropout_prob_attn if self.training else 0.0 if batch_size == 1 or self.training: if query.shape[2] > 1: sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=dropout_p, is_causal=True) else: sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=dropout_p, is_causal=False) else: (query_length, key_length) = (query.size(-2), key.size(-2)) if query_length > 1: causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length].to(torch.bool) causal_mask = torch.where(causal_mask, 0, mask_value) causal_mask = causal_mask.expand(batch_size, -1, -1, -1) if attention_mask is not None: attention_mask = causal_mask + attention_mask sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=dropout_p, is_causal=False) if self.downcast_qk: sdpa_result = sdpa_result.to(value.dtype) return (sdpa_result, None) def bark_wrapped_scaled_dot_product(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None): raise_on_head_mask(head_mask) is_causal = self.is_causal and query.shape[2] != 1 sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal) return (sdpa_result, None) def gpt_neo_wrapped_scaled_dot_product(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None): raise_on_head_mask(head_mask) query = query * self.scale batch_size = query.shape[0] mask_value = torch.finfo(value.dtype).min mask_value = torch.full([], mask_value, dtype=value.dtype) if batch_size == 1 and attention_mask is not None and (attention_mask[0, 0, -1, -1] < -1): raise ValueError("BetterTransformer does not support padding='max_length' with a batch size of 1.") dropout_p = self.dropout_prob_attn if self.training else 0.0 if (batch_size == 1 or self.training) and self.attention_type == 'global': if query.shape[2] > 1: sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=dropout_p, is_causal=True) else: sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=dropout_p, is_causal=False) else: (query_length, key_length) = (query.size(-2), key.size(-2)) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] causal_mask = torch.where(causal_mask, 0, mask_value) if batch_size > 1: causal_mask = causal_mask.expand(batch_size, -1, -1, -1) if attention_mask is not None: attention_mask = causal_mask + attention_mask sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=dropout_p, is_causal=False) return (sdpa_result, None) def codegen_wrapped_scaled_dot_product(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None): raise_on_head_mask(head_mask) batch_size = query.shape[0] mask_value = torch.finfo(value.dtype).min mask_value = torch.full([], mask_value, dtype=value.dtype) if batch_size == 1 and attention_mask is not None and (attention_mask[0, 0, -1, -1] < -1): raise ValueError("BetterTransformer does not support padding='max_length' with a batch size of 1.") query = query.to(value.dtype) key = key.to(value.dtype) dropout_p = self.dropout_prob_attn if self.training else 0.0 if batch_size == 1 or self.training: if query.shape[2] > 1: sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=dropout_p, is_causal=True) else: sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=dropout_p, is_causal=False) else: (query_length, key_length) = (query.size(-2), key.size(-2)) if query_length > 1: causal_mask = self.causal_mask[:, :, key_length - query_length:key_length, :key_length].to(torch.bool) causal_mask = torch.where(causal_mask, 0, mask_value) causal_mask = causal_mask.expand(batch_size, -1, -1, -1) attention_mask = torch.min(causal_mask, attention_mask) sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=dropout_p, is_causal=False) return (sdpa_result, None) def opt_forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: raise_on_head_mask(layer_head_mask) if output_attentions is True: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') is_cross_attention = key_value_states is not None (batch_size, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states), -1, batch_size) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states), -1, batch_size) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states), -1, batch_size) if self.is_decoder: past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, batch_size) query_states = query_states * self.scale dropout_p = self.dropout if self.training else 0.0 if batch_size == 1 or self.training: if query_states.shape[2] > 1: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=None, dropout_p=dropout_p, is_causal=True) else: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=None, dropout_p=dropout_p, is_causal=False) else: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=dropout_p, is_causal=False) if attn_output.size() != (batch_size, self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, None, past_key_value) def t5_forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, **kwargs): raise_on_head_mask(layer_head_mask) if output_attentions is True: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if len(self.pruned_heads) > 0: raise ValueError(f'Setting `pruned_heads` is unsupported with BetterTransformer, found {self.pruned_heads}.') (batch_size, seq_length) = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert len(past_key_value) == 2, f'past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states' real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): if key_value_states is None: hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: hidden_states = shape(proj_layer(key_value_states)) else: hidden_states = past_key_value return hidden_states query_states = shape(self.q(hidden_states)) key_states = project(hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None) value_states = project(hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None) dropout_p = self.dropout if self.training else 0.0 query_states = self.scale * query_states if position_bias is None and (not self.has_relative_attention_bias): if mask is None: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=None, dropout_p=dropout_p, is_causal=False) elif mask is not None: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=mask, dropout_p=dropout_p, is_causal=False) if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros((1, self.n_heads, real_seq_length, key_length), device=value_states.device, dtype=value_states.dtype) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=value_states.device) if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1):, :] if mask is not None: position_bias = position_bias + mask if self.has_relative_attention_bias: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=position_bias, dropout_p=dropout_p, is_causal=False) else: attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=position_bias, dropout_p=dropout_p, is_causal=False) attn_output = unshape(attn_output) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if self.is_decoder and use_cache else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) return outputs def bart_forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: raise_on_head_mask(layer_head_mask) if output_attentions is True: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) key_states = key_states value_states = value_states attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=False) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, None, past_key_value) if check_if_transformers_greater('4.44'): from transformers.cache_utils import Cache from transformers.models.bloom.modeling_bloom import dropout_add def bloom_forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None): raise_on_head_mask(head_mask) if output_attentions is True: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') (batch_size, q_length, _) = hidden_states.shape fused_qkv = self.query_key_value(hidden_states) (query_layer, key_layer, value_layer) = self._reshape(fused_qkv) if layer_past is not None: cache_kwargs = {'cache_position': cache_position} (key_layer, value_layer) = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) alibi = alibi.reshape(batch_size, -1, *alibi.shape[1:]) if attention_mask is not None: kv_length = cache_position[-1] + 1 causal_mask = attention_mask[:, :, :, :kv_length] alibi = torch.masked_fill(alibi, causal_mask.bool(), torch.finfo(alibi.dtype).min) context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=alibi, dropout_p=self.dropout_prob_attn if self.training else 0.0) context_layer = context_layer.transpose(1, 2) context_layer = context_layer.reshape(batch_size, q_length, self.hidden_size) if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + F.linear(context_layer[:, :, int(i * slices):int((i + 1) * slices)], self.dense.weight[:, int(i * slices):int((i + 1) * slices)]) else: output_tensor = self.dense(context_layer) output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training) outputs = (output_tensor, layer_past) return outputs else: def bloom_forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, **kwargs): raise_on_head_mask(head_mask) if output_attentions is True: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') fused_qkv = self.query_key_value(hidden_states) (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) (batch_size, q_length, _, _) = query_layer.shape query_layer = query_layer.transpose(1, 2) if layer_past is not None: (past_key, past_value) = layer_past past_key = past_key.transpose(1, 2) key_layer = key_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) key_layer = torch.cat((past_key, key_layer), dim=1) value_layer = torch.cat((past_value, value_layer), dim=1) key_layer = key_layer.reshape(batch_size, self.num_heads, *key_layer.shape[1:]) value_layer = value_layer.reshape(batch_size, self.num_heads, *value_layer.shape[1:]) else: key_layer = key_layer.transpose(1, 2) value_layer = value_layer.transpose(1, 2) alibi = alibi.reshape(batch_size, -1, *alibi.shape[1:]) alibi = torch.masked_fill(alibi, attention_mask, torch.finfo(alibi.dtype).min) context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=alibi, dropout_p=self.dropout_prob_attn if self.training else 0.0) context_layer = context_layer.transpose(1, 2) context_layer = context_layer.reshape(*context_layer.shape[:2], -1) if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + torch.nn.functional.linear(context_layer[:, :, int(i * slices):int((i + 1) * slices)], self.dense.weight[:, int(i * slices):int((i + 1) * slices)]) else: output_tensor = self.dense(context_layer) output_tensor = torch.nn.functional.dropout(output_tensor, p=self.hidden_dropout, training=self.training) output_tensor = residual + output_tensor if use_cache is True: present = (key_layer.reshape(-1, *key_layer.shape[2:]).transpose(1, 2), value_layer.reshape(-1, *value_layer.shape[2:])) else: present = None return (output_tensor, present) # File: optimum-main/optimum/bettertransformer/models/base.py from typing import TYPE_CHECKING if TYPE_CHECKING: from transformers import PretrainedConfig import torch from ...utils import logging, recurse_getattr, recurse_setattr KNOWN_ACTIVATION_ATTRIBUTES = ['hidden_act', 'activation', 'act_fn', 'activation_function'] KNOWN_POS_EMB_ATTRIBUTES = ['position_embedding_type'] KNOWN_NUM_LAYERS = ['num_hidden_layers', 'num_layers', 'encoder_layers', 'n_layers'] SUPPORTED_ACTIVATION_FUNCTIONS = ['gelu', 'relu', 'gelu_new'] USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS = ['quick_gelu'] logger = logging.get_logger(__name__) class BetterTransformerBaseLayer: def __init__(self, config: 'PretrainedConfig'): self.norm_first = False self.use_gelu = False self.act_fn = None self.pos_emb_type = None self.num_heads = None self.embed_dim = None self.num_layers = None self.original_layers_mapping = {} self.module_mapping = None self.keys_to_ignore = [] for attr in KNOWN_ACTIVATION_ATTRIBUTES: if hasattr(config, attr): self.act_fn = getattr(config, attr) break if self.act_fn is None and hasattr(self, '_get_activation_function'): self.act_fn = self._get_activation_function(config) for attr in KNOWN_POS_EMB_ATTRIBUTES: if hasattr(config, attr): self.pos_emb_type = getattr(config, attr) break for attr in KNOWN_NUM_LAYERS: if hasattr(config, attr): self.num_layers = getattr(config, attr) break def validate_bettertransformer(self): if self.num_heads is None: raise ValueError('Number of heads not set for `BetterTransformer` integration.') if self.embed_dim is None: raise ValueError('Embedding dimension not set for `BetterTransformer` integration.') if self.norm2_eps is None or self.norm1_eps is None: raise ValueError('`norm2_eps` and `norm1_eps` not set for `BetterTransformer` integration.') if self.pos_emb_type is not None and self.pos_emb_type != 'absolute': raise ValueError(f'Positional embedding type {self.pos_emb_type} not supported for `BetterTransformer` integration') if self.norm1_eps != self.norm2_eps: raise ValueError('norm1_eps and norm2_eps must be equal for `BetterTransformer` integration.') if self.act_fn in USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS: logger.warning(f'Overridding {self.act_fn} activation with gelu. Use the transformed model at your own risk, the output logits could be significantly different.') self.act_fn = 'gelu' elif self.act_fn not in SUPPORTED_ACTIVATION_FUNCTIONS: raise ValueError(f'Activation function {self.act_fn} not supported for `BetterTransformer` integration.') self.use_gelu = self.act_fn == 'gelu' or self.act_fn == 'gelu_new' if self.num_heads % 2 == 1: raise ValueError(f'Number of heads {self.num_heads} is not supported for `BetterTransformer` integration. Number of heads must be even.') def _revert(self, module: torch.nn.Module) -> torch.nn.Module: if self.module_mapping is not None: if '' in self.module_mapping.values(): for (bt_module_attr_name, value) in self.module_mapping.items(): if value == '': module = getattr(self, bt_module_attr_name) return module else: raise NotImplementedError('replacing a submodule in revert is not supported') for (modified_layer_key_names, original_layer_key_names) in self.original_layers_mapping.items(): if isinstance(original_layer_key_names, list): current_weight = getattr(self, modified_layer_key_names) split_index = current_weight.shape[0] // len(original_layer_key_names) for (i, subparam_name) in enumerate(original_layer_key_names): if recurse_getattr(module, subparam_name) is None: continue if module not in self.keys_to_ignore: parameter = current_weight[i * split_index:(i + 1) * split_index].clone() if isinstance(recurse_getattr(module, subparam_name), torch.nn.Parameter): parameter = torch.nn.Parameter(parameter) recurse_setattr(module, subparam_name, parameter) elif isinstance(original_layer_key_names, str): if recurse_getattr(module, original_layer_key_names) is None: continue parameter = getattr(self, modified_layer_key_names) if isinstance(recurse_getattr(module, original_layer_key_names), torch.nn.Parameter): parameter = torch.nn.Parameter(parameter) recurse_setattr(module, original_layer_key_names, parameter) else: raise ValueError(f'Invalid type {type(modified_layer_key_names)} for `original_layers_mapping`', ' please use either `str` or `list`.') return module # File: optimum-main/optimum/bettertransformer/models/decoder_models.py from typing import TYPE_CHECKING import torch import torch.nn as nn from transformers.models.bart.modeling_bart import BartAttention from transformers.models.blenderbot.modeling_blenderbot import BlenderbotAttention from transformers.models.bloom.modeling_bloom import BloomAttention from transformers.models.codegen.modeling_codegen import CodeGenAttention from transformers.models.gpt2.modeling_gpt2 import GPT2Attention from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoSelfAttention from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXAttention from transformers.models.gptj.modeling_gptj import GPTJAttention from transformers.models.m2m_100.modeling_m2m_100 import M2M100Attention from transformers.models.marian.modeling_marian import MarianAttention from transformers.models.opt.modeling_opt import OPTAttention from transformers.models.pegasus.modeling_pegasus import PegasusAttention from transformers.models.t5.modeling_t5 import T5Attention from ...utils.import_utils import check_if_transformers_greater if check_if_transformers_greater('4.31'): from transformers.models.bark.modeling_bark import BarkSelfAttention else: from ...utils.dummy_bettertransformer_objects import BarkSelfAttention from .attention import bark_wrapped_scaled_dot_product, bart_forward, bloom_forward, codegen_wrapped_scaled_dot_product, gpt2_wrapped_scaled_dot_product, gpt_neo_wrapped_scaled_dot_product, opt_forward, t5_forward from .base import BetterTransformerBaseLayer if TYPE_CHECKING: from transformers import PretrainedConfig class GPT2AttentionLayerBetterTransformer(BetterTransformerBaseLayer, GPT2Attention): _attn = gpt2_wrapped_scaled_dot_product def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config, layer.is_cross_attention, layer.layer_idx) submodules = ['c_proj', 'c_attn', 'attn_dropout', 'resid_dropout', 'bias', 'masked_bias'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.module_mapping = None self.original_layers_mapping = {submodule: submodule for submodule in submodules} if layer.is_cross_attention: setattr(self, 'q_attn', getattr(layer, 'q_attn')) self.original_layers_mapping['q_attn'] = 'q_attn' self.downcast_qk = False self.dropout_prob_attn = config.attn_pdrop def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class GPTJAttentionLayerBetterTransformer(BetterTransformerBaseLayer, GPTJAttention, nn.Module): _attn = gpt2_wrapped_scaled_dot_product def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config) submodules = ['k_proj', 'v_proj', 'q_proj', 'out_proj', 'attn_dropout', 'resid_dropout', 'bias', 'scale_attn', 'masked_bias'] if hasattr(layer, 'embed_positions'): submodules.append('embed_positions') for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.module_mapping = None self.original_layers_mapping = {submodule: submodule for submodule in submodules} self.downcast_qk = True self.dropout_prob_attn = config.attn_pdrop def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class GPTNeoXAttentionLayerBetterTransformer(BetterTransformerBaseLayer, GPTNeoXAttention, nn.Module): _attn = gpt2_wrapped_scaled_dot_product def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config) self.module_mapping = None submodules = ['rotary_emb', 'query_key_value', 'dense', 'bias', 'masked_bias', 'norm_factor'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} self.downcast_qk = True self.dropout_prob_attn = 0.0 def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class GPTNeoAttentionLayerBetterTransformer(BetterTransformerBaseLayer, GPTNeoSelfAttention, nn.Module): _attn = gpt_neo_wrapped_scaled_dot_product def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) if layer.bias[0][0][-1][0] == 1: self.attention_type = 'global' else: self.attention_type = 'local' with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config, self.attention_type) self.module_mapping = None submodules = ['attn_dropout', 'resid_dropout', 'k_proj', 'v_proj', 'q_proj', 'out_proj', 'bias', 'masked_bias'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} self.scale = torch.sqrt(torch.tensor(layer.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.dropout_prob_attn = float(config.attention_dropout) def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class BarkAttentionLayerBetterTransformer(BetterTransformerBaseLayer, BarkSelfAttention, nn.Module): _attn = bark_wrapped_scaled_dot_product def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig', is_causal: bool=False): super().__init__(config) is_causal = layer.is_causal config.dropout = layer.dropout config.hidden_size = layer.embed_dim config.num_heads = layer.num_heads config.bias = layer.out_proj.bias is not None if is_causal: config.block_size = layer.bias.shape[-1] with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config, is_causal) self.module_mapping = None submodules = ['dropout', 'attn_dropout', 'resid_dropout', 'att_proj', 'out_proj'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} if is_causal: setattr(self, 'bias', getattr(layer, 'bias')) self.original_layers_mapping['bias'] = 'bias' self.supports_training = False self.dropout_prob_attn = float(config.dropout) def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class BloomAttentionLayerBetterTransformer(BetterTransformerBaseLayer, BloomAttention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config) self.dropout_prob_attn = config.attention_dropout self.module_mapping = None self.layer_idx = getattr(layer, 'layer_idx', None) submodules = ['query_key_value', 'dense', 'attention_dropout'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} def forward(self, *args, **kwargs): return bloom_forward(self, *args, **kwargs) class CodegenAttentionLayerBetterTransformer(BetterTransformerBaseLayer, CodeGenAttention, nn.Module): _attn = codegen_wrapped_scaled_dot_product def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config) self.module_mapping = None submodules = ['attn_dropout', 'resid_dropout', 'qkv_proj', 'out_proj', 'causal_mask', 'scale_attn'] if hasattr(layer, 'embed_positions'): submodules.append('embed_positions') for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} self.dropout_prob_attn = config.attn_pdrop def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class OPTAttentionLayerBetterTransformer(BetterTransformerBaseLayer, OPTAttention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config, layer.is_decoder) self.scale = torch.sqrt(torch.tensor(layer.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.module_mapping = None submodules = ['k_proj', 'v_proj', 'q_proj', 'out_proj'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} def forward(self, *args, **kwargs): return opt_forward(self, *args, **kwargs) class T5AttentionLayerBetterTransformer(BetterTransformerBaseLayer, T5Attention, torch.nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): if hasattr(config, 'text_config'): config = config.text_config super().__init__(config) with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(config, layer.has_relative_attention_bias) submodules = ['q', 'k', 'v', 'o'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) head_dim = layer.d_model // layer.n_heads self.scale = torch.sqrt(torch.tensor(head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.original_layers_mapping = {submodule: submodule for submodule in submodules} if layer.has_relative_attention_bias: setattr(self, 'relative_attention_bias', layer.relative_attention_bias) self.original_layers_mapping['relative_attention_bias'] = 'relative_attention_bias' self.module_mapping = None self.is_decoder = layer.is_decoder def forward(self, *args, **kwargs): return t5_forward(self, *args, **kwargs) def bart_bettertransformer_init(self, layer: 'nn.Module', config: 'PretrainedConfig'): with torch.device('meta'): super(BetterTransformerBaseLayer, self).__init__(layer.embed_dim, layer.num_heads, layer.dropout, layer.is_decoder, layer.k_proj.bias is not None) self.module_mapping = None submodules = ['k_proj', 'v_proj', 'q_proj', 'out_proj'] for attr in submodules: setattr(self, attr, getattr(layer, attr)) self.original_layers_mapping = {submodule: submodule for submodule in submodules} self.is_decoder = layer.is_decoder class BartAttentionLayerBetterTransformer(BetterTransformerBaseLayer, BartAttention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) bart_bettertransformer_init(self, layer, config) def forward(self, *args, **kwargs): return bart_forward(self, *args, **kwargs) class BlenderbotAttentionLayerBetterTransformer(BetterTransformerBaseLayer, BlenderbotAttention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) bart_bettertransformer_init(self, layer, config) def forward(self, *args, **kwargs): return bart_forward(self, *args, **kwargs) class M2M100AttentionLayerBetterTransformer(BetterTransformerBaseLayer, M2M100Attention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) bart_bettertransformer_init(self, layer, config) def forward(self, *args, **kwargs): return bart_forward(self, *args, **kwargs) class MarianAttentionLayerBetterTransformer(BetterTransformerBaseLayer, MarianAttention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): super().__init__(config) bart_bettertransformer_init(self, layer, config) def forward(self, *args, **kwargs): return bart_forward(self, *args, **kwargs) class PegasusAttentionLayerBetterTransformer(BetterTransformerBaseLayer, PegasusAttention, nn.Module): def __init__(self, layer: 'nn.Module', config: 'PretrainedConfig'): bart_bettertransformer_init(self, layer, config) def forward(self, *args, **kwargs): return bart_forward(self, *args, **kwargs) # File: optimum-main/optimum/bettertransformer/models/encoder_models.py from typing import TYPE_CHECKING import torch import torch.nn as nn import torch.nn.functional as F from transformers.activations import ACT2FN from .base import BetterTransformerBaseLayer if TYPE_CHECKING: from transformers import PretrainedConfig class AlbertLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, albert_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([albert_layer.attention.query.weight, albert_layer.attention.key.weight, albert_layer.attention.value.weight])) self.in_proj_bias = nn.Parameter(torch.cat([albert_layer.attention.query.bias, albert_layer.attention.key.bias, albert_layer.attention.value.bias])) self.out_proj_weight = albert_layer.attention.dense.weight self.out_proj_bias = albert_layer.attention.dense.bias self.linear1_weight = albert_layer.ffn.weight self.linear1_bias = albert_layer.ffn.bias self.linear2_weight = albert_layer.ffn_output.weight self.linear2_bias = albert_layer.ffn_output.bias self.norm1_eps = albert_layer.attention.LayerNorm.eps self.norm1_weight = albert_layer.attention.LayerNorm.weight self.norm1_bias = albert_layer.attention.LayerNorm.bias self.norm2_eps = albert_layer.full_layer_layer_norm.eps self.norm2_weight = albert_layer.full_layer_layer_norm.weight self.norm2_bias = albert_layer.full_layer_layer_norm.bias self.num_heads = albert_layer.attention.num_attention_heads self.embed_dim = albert_layer.attention.all_head_size self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['attention.query.weight', 'attention.key.weight', 'attention.value.weight'], 'in_proj_bias': ['attention.query.bias', 'attention.key.bias', 'attention.value.bias'], 'out_proj_weight': 'attention.dense.weight', 'out_proj_bias': 'attention.dense.bias', 'linear1_weight': 'ffn.weight', 'linear1_bias': 'ffn.bias', 'linear2_weight': 'ffn_output.weight', 'linear2_bias': 'ffn_output.bias', 'norm1_eps': 'attention.LayerNorm.eps', 'norm1_weight': 'attention.LayerNorm.weight', 'norm1_bias': 'attention.LayerNorm.bias', 'norm2_eps': 'full_layer_layer_norm.eps', 'norm2_weight': 'full_layer_layer_norm.weight', 'norm2_bias': 'full_layer_layer_norm.bias'} self.attention_head_size = config.hidden_size // config.num_attention_heads self.attention_probs_dropout_prob = config.attention_probs_dropout_prob self.hidden_dropout_prob = config.hidden_dropout_prob self.act_fn_callable = ACT2FN[self.act_fn] self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, *_): if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if hidden_states.is_nested: attention_mask = None if attention_mask is not None: attention_mask = attention_mask.bool() attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0) else: qkv = F.linear(hidden_states, weight=self.in_proj_weight, bias=self.in_proj_bias) qkv = qkv.view(qkv.size()[:-1] + (3, self.num_heads, self.attention_head_size)).permute(2, 0, 3, 1, 4) (query, key, value) = (qkv[0], qkv[1], qkv[2]) if self.training: attention_mask = None attention_out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, is_causal=False, dropout_p=self.attention_probs_dropout_prob if self.training else 0.0) attention_out = attention_out.permute(0, 2, 1, 3).contiguous() new_attention_out_shape = attention_out.size()[:-2] + (self.num_heads * self.attention_head_size,) attention_out = attention_out.view(new_attention_out_shape) attention_out = F.layer_norm(F.dropout(F.linear(attention_out, self.out_proj_weight, self.out_proj_bias), p=self.hidden_dropout_prob, training=self.training) + hidden_states, normalized_shape=self.norm1_weight.shape, weight=self.norm1_weight, bias=self.norm1_bias) hidden_states = self.act_fn_callable(F.linear(attention_out, self.linear1_weight, self.linear1_bias)) hidden_states = F.layer_norm(attention_out + F.dropout(F.linear(hidden_states, self.linear2_weight, self.linear2_bias), p=self.hidden_dropout_prob, training=self.training), normalized_shape=self.norm2_weight.shape, weight=self.norm2_weight, bias=self.norm2_bias) return (hidden_states,) class BertLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, bert_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.self.query.weight, bert_layer.attention.self.key.weight, bert_layer.attention.self.value.weight])) self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.self.query.bias, bert_layer.attention.self.key.bias, bert_layer.attention.self.value.bias])) self.out_proj_weight = bert_layer.attention.output.dense.weight self.out_proj_bias = bert_layer.attention.output.dense.bias self.linear1_weight = bert_layer.intermediate.dense.weight self.linear1_bias = bert_layer.intermediate.dense.bias self.linear2_weight = bert_layer.output.dense.weight self.linear2_bias = bert_layer.output.dense.bias self.norm1_eps = bert_layer.attention.output.LayerNorm.eps self.norm1_weight = bert_layer.attention.output.LayerNorm.weight self.norm1_bias = bert_layer.attention.output.LayerNorm.bias self.norm2_eps = bert_layer.output.LayerNorm.eps self.norm2_weight = bert_layer.output.LayerNorm.weight self.norm2_bias = bert_layer.output.LayerNorm.bias self.num_heads = bert_layer.attention.self.num_attention_heads self.embed_dim = bert_layer.attention.self.all_head_size self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['attention.self.query.weight', 'attention.self.key.weight', 'attention.self.value.weight'], 'in_proj_bias': ['attention.self.query.bias', 'attention.self.key.bias', 'attention.self.value.bias'], 'out_proj_weight': 'attention.output.dense.weight', 'out_proj_bias': 'attention.output.dense.bias', 'linear1_weight': 'intermediate.dense.weight', 'linear1_bias': 'intermediate.dense.bias', 'linear2_weight': 'output.dense.weight', 'linear2_bias': 'output.dense.bias', 'norm1_eps': 'attention.output.LayerNorm.eps', 'norm1_weight': 'attention.output.LayerNorm.weight', 'norm1_bias': 'attention.output.LayerNorm.bias', 'norm2_eps': 'output.LayerNorm.eps', 'norm2_weight': 'output.LayerNorm.weight', 'norm2_bias': 'output.LayerNorm.bias'} self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.hidden_dropout_prob = config.hidden_dropout_prob self.attention_probs_dropout_prob = config.attention_probs_dropout_prob self.act_fn_callable = ACT2FN[self.act_fn] self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, *_): if not self.training and (not torch._C._is_any_autocast_enabled()): if hidden_states.is_nested: attention_mask = None if attention_mask is not None: attention_mask = attention_mask.bool() attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0) else: qkv = F.linear(hidden_states, weight=self.in_proj_weight, bias=self.in_proj_bias) qkv = qkv.view(qkv.size()[:-1] + (3, self.num_heads, self.attention_head_size)).permute(2, 0, 3, 1, 4) (query, key, value) = (qkv[0], qkv[1], qkv[2]) if self.training: attention_mask = None attention_out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, is_causal=False, dropout_p=self.attention_probs_dropout_prob if self.training else 0.0) attention_out = attention_out.permute(0, 2, 1, 3).contiguous() new_attention_out_shape = attention_out.size()[:-2] + (self.num_heads * self.attention_head_size,) attention_out = attention_out.view(new_attention_out_shape) attention_out = F.layer_norm(F.dropout(F.linear(attention_out, self.out_proj_weight, self.out_proj_bias), p=self.hidden_dropout_prob, training=self.training) + hidden_states, normalized_shape=self.norm1_weight.shape, weight=self.norm1_weight, bias=self.norm1_bias) hidden_states = self.act_fn_callable(F.linear(attention_out, self.linear1_weight, self.linear1_bias)) hidden_states = F.layer_norm(attention_out + F.dropout(F.linear(hidden_states, self.linear2_weight, self.linear2_bias), p=self.hidden_dropout_prob, training=self.training), normalized_shape=self.norm2_weight.shape, weight=self.norm2_weight, bias=self.norm2_bias) return (hidden_states,) class BartEncoderLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, bart_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([bart_layer.self_attn.q_proj.weight, bart_layer.self_attn.k_proj.weight, bart_layer.self_attn.v_proj.weight])) self.in_proj_bias = nn.Parameter(torch.cat([bart_layer.self_attn.q_proj.bias, bart_layer.self_attn.k_proj.bias, bart_layer.self_attn.v_proj.bias])) self.out_proj_weight = bart_layer.self_attn.out_proj.weight self.out_proj_bias = bart_layer.self_attn.out_proj.bias self.linear1_weight = bart_layer.fc1.weight self.linear1_bias = bart_layer.fc1.bias self.linear2_weight = bart_layer.fc2.weight self.linear2_bias = bart_layer.fc2.bias self.norm1_eps = bart_layer.self_attn_layer_norm.eps self.norm1_weight = bart_layer.self_attn_layer_norm.weight self.norm1_bias = bart_layer.self_attn_layer_norm.bias self.norm2_eps = bart_layer.final_layer_norm.eps self.norm2_weight = bart_layer.final_layer_norm.weight self.norm2_bias = bart_layer.final_layer_norm.bias self.num_heads = bart_layer.self_attn.num_heads self.embed_dim = bart_layer.self_attn.embed_dim self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['self_attn.q_proj.weight', 'self_attn.k_proj.weight', 'self_attn.v_proj.weight'], 'in_proj_bias': ['self_attn.q_proj.bias', 'self_attn.k_proj.bias', 'self_attn.v_proj.bias'], 'out_proj_weight': 'self_attn.out_proj.weight', 'out_proj_bias': 'self_attn.out_proj.bias', 'linear1_weight': 'fc1.weight', 'linear1_bias': 'fc1.bias', 'linear2_weight': 'fc2.weight', 'linear2_bias': 'fc2.bias', 'norm1_eps': 'self_attn_layer_norm.eps', 'norm1_weight': 'self_attn_layer_norm.weight', 'norm1_bias': 'self_attn_layer_norm.bias', 'norm2_eps': 'final_layer_norm.eps', 'norm2_weight': 'final_layer_norm.weight', 'norm2_bias': 'final_layer_norm.bias'} self.dropout = config.attention_dropout self.activation_dropout = config.activation_dropout self.attention_head_size = config.d_model // config.encoder_attention_heads self.act_fn_callable = ACT2FN[self.act_fn] self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, output_attentions: bool, position_bias=None, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if not hasattr(hidden_states, 'original_shape'): original_shape = hidden_states.shape else: original_shape = hidden_states.original_shape if hidden_states.is_nested: attention_mask = None if attention_mask is not None: if len(attention_mask.shape) == 4: attention_mask = attention_mask.squeeze(1)[:, 0] attention_mask = attention_mask.bool() attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if not self.is_last_layer: hidden_states.original_shape = original_shape elif hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0, original_shape) else: qkv = F.linear(hidden_states, weight=self.in_proj_weight, bias=self.in_proj_bias) qkv = qkv.view(qkv.size()[:-1] + (3, self.num_heads, self.attention_head_size)).permute(2, 0, 3, 1, 4) (query, key, value) = (qkv[0], qkv[1], qkv[2]) if self.training: attention_mask = None attention_out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, is_causal=False, dropout_p=self.dropout if self.training else 0.0) attention_out = attention_out.permute(0, 2, 1, 3).contiguous() new_attention_out_shape = attention_out.size()[:-2] + (self.num_heads * self.attention_head_size,) attention_out = attention_out.view(new_attention_out_shape) attention_out = F.layer_norm(F.dropout(F.linear(attention_out, self.out_proj_weight, self.out_proj_bias), p=self.dropout, training=self.training) + hidden_states, normalized_shape=self.norm1_weight.shape, weight=self.norm1_weight, bias=self.norm1_bias) hidden_states = F.dropout(self.act_fn_callable(F.linear(attention_out, self.linear1_weight, self.linear1_bias)), p=self.activation_dropout, training=self.training) hidden_states = F.layer_norm(attention_out + F.dropout(F.linear(hidden_states, self.linear2_weight, self.linear2_bias), p=self.dropout, training=self.training), normalized_shape=self.norm2_weight.shape, weight=self.norm2_weight, bias=self.norm2_bias) return (hidden_states,) class MBartEncoderLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, mbart_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([mbart_layer.self_attn.q_proj.weight, mbart_layer.self_attn.k_proj.weight, mbart_layer.self_attn.v_proj.weight])) self.in_proj_bias = nn.Parameter(torch.cat([mbart_layer.self_attn.q_proj.bias, mbart_layer.self_attn.k_proj.bias, mbart_layer.self_attn.v_proj.bias])) self.out_proj_weight = mbart_layer.self_attn.out_proj.weight self.out_proj_bias = mbart_layer.self_attn.out_proj.bias self.linear1_weight = mbart_layer.fc1.weight self.linear1_bias = mbart_layer.fc1.bias self.linear2_weight = mbart_layer.fc2.weight self.linear2_bias = mbart_layer.fc2.bias self.norm1_eps = mbart_layer.self_attn_layer_norm.eps self.norm1_weight = mbart_layer.self_attn_layer_norm.weight self.norm1_bias = mbart_layer.self_attn_layer_norm.bias self.norm2_eps = mbart_layer.final_layer_norm.eps self.norm2_weight = mbart_layer.final_layer_norm.weight self.norm2_bias = mbart_layer.final_layer_norm.bias self.num_heads = mbart_layer.self_attn.num_heads self.embed_dim = mbart_layer.self_attn.embed_dim self.is_last_layer = False self.norm_first = True self.original_layers_mapping = {'in_proj_weight': ['self_attn.q_proj.weight', 'self_attn.k_proj.weight', 'self_attn.v_proj.weight'], 'in_proj_bias': ['self_attn.q_proj.bias', 'self_attn.k_proj.bias', 'self_attn.v_proj.bias'], 'out_proj_weight': 'self_attn.out_proj.weight', 'out_proj_bias': 'self_attn.out_proj.bias', 'linear1_weight': 'fc1.weight', 'linear1_bias': 'fc1.bias', 'linear2_weight': 'fc2.weight', 'linear2_bias': 'fc2.bias', 'norm1_weight': 'self_attn_layer_norm.weight', 'norm1_bias': 'self_attn_layer_norm.bias', 'norm1_eps': 'self_attn_layer_norm.eps', 'norm2_weight': 'final_layer_norm.weight', 'norm2_bias': 'final_layer_norm.bias', 'norm2_eps': 'final_layer_norm.eps'} self.dropout = config.attention_dropout self.activation_dropout = config.activation_dropout self.attention_head_size = config.d_model // config.encoder_attention_heads self.act_fn_callable = ACT2FN[self.act_fn] self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, output_attentions: bool, position_bias=None, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if not hasattr(hidden_states, 'original_shape'): original_shape = hidden_states.shape else: original_shape = hidden_states.original_shape if hidden_states.is_nested: attention_mask = None if attention_mask is not None: if len(attention_mask.shape) == 4: attention_mask = attention_mask.squeeze(1)[:, 0] attention_mask = attention_mask.bool() attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if not self.is_last_layer: hidden_states.original_shape = original_shape elif hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0, original_shape) else: residual = hidden_states hidden_states = F.layer_norm(hidden_states, normalized_shape=self.norm1_weight.shape, weight=self.norm1_weight, bias=self.norm1_bias) qkv = F.linear(hidden_states, weight=self.in_proj_weight, bias=self.in_proj_bias) qkv = qkv.view(qkv.size()[:-1] + (3, self.num_heads, self.attention_head_size)).permute(2, 0, 3, 1, 4) (query, key, value) = (qkv[0], qkv[1], qkv[2]) if self.training: attention_mask = None attention_out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, is_causal=False, dropout_p=self.dropout if self.training else 0.0) attention_out = attention_out.permute(0, 2, 1, 3).contiguous() new_attention_out_shape = attention_out.size()[:-2] + (self.num_heads * self.attention_head_size,) attention_out = attention_out.view(new_attention_out_shape) hidden_states = residual + F.dropout(F.linear(attention_out, self.out_proj_weight, self.out_proj_bias), p=self.dropout, training=self.training) residual = hidden_states hidden_states = F.layer_norm(hidden_states, normalized_shape=self.norm2_weight.shape, weight=self.norm2_weight, bias=self.norm2_bias) hidden_states = F.dropout(self.act_fn_callable(F.linear(hidden_states, self.linear1_weight, self.linear1_bias)), p=self.activation_dropout, training=self.training) hidden_states = residual + F.dropout(F.linear(hidden_states, self.linear2_weight, self.linear2_bias), p=self.dropout, training=self.training) return (hidden_states,) class DistilBertLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, bert_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight])) self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias])) self.out_proj_weight = bert_layer.attention.out_lin.weight self.out_proj_bias = bert_layer.attention.out_lin.bias self.linear1_weight = bert_layer.ffn.lin1.weight self.linear1_bias = bert_layer.ffn.lin1.bias self.linear2_weight = bert_layer.ffn.lin2.weight self.linear2_bias = bert_layer.ffn.lin2.bias self.norm1_eps = bert_layer.sa_layer_norm.eps self.norm1_weight = bert_layer.sa_layer_norm.weight self.norm1_bias = bert_layer.sa_layer_norm.bias self.norm2_eps = bert_layer.output_layer_norm.eps self.norm2_weight = bert_layer.output_layer_norm.weight self.norm2_bias = bert_layer.output_layer_norm.bias self.num_heads = bert_layer.attention.n_heads self.embed_dim = bert_layer.attention.dim self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['attention.q_lin.weight', 'attention.k_lin.weight', 'attention.v_lin.weight'], 'in_proj_bias': ['attention.q_lin.bias', 'attention.k_lin.bias', 'attention.v_lin.bias'], 'out_proj_weight': 'attention.out_lin.weight', 'out_proj_bias': 'attention.out_lin.bias', 'linear1_weight': 'ffn.lin1.weight', 'linear1_bias': 'ffn.lin1.bias', 'linear2_weight': 'ffn.lin2.weight', 'linear2_bias': 'ffn.lin2.bias', 'norm1_weight': 'sa_layer_norm.weight', 'norm1_bias': 'sa_layer_norm.bias', 'norm2_weight': 'output_layer_norm.weight', 'norm2_bias': 'output_layer_norm.bias'} self.attention_dropout = config.attention_dropout self.dropout = config.dropout self.attention_head_size = config.dim // config.n_heads self.act_fn_callable = ACT2FN[self.act_fn] self.validate_bettertransformer() def forward(self, hidden_states, attn_mask, output_attentions: bool, head_mask=None, *_): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if hidden_states.is_nested: attn_mask = None if attn_mask is not None: attn_mask = attn_mask.bool() attn_mask = torch.reshape(attn_mask, (attn_mask.shape[0], attn_mask.shape[-1])) seqlen = attn_mask.shape[1] lengths = torch.sum(~attn_mask, 1) if not all((l == seqlen for l in lengths)): hidden_states = torch._nested_tensor_from_mask(hidden_states, attn_mask) attn_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attn_mask) if hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0) else: qkv = F.linear(hidden_states, weight=self.in_proj_weight, bias=self.in_proj_bias) qkv = qkv.view(qkv.size()[:-1] + (3, self.num_heads, self.attention_head_size)).permute(2, 0, 3, 1, 4) (query, key, value) = (qkv[0], qkv[1], qkv[2]) attn_mask = attn_mask.unsqueeze(1).unsqueeze(2).to(dtype=query.dtype) attn_mask = (1.0 - attn_mask) * torch.finfo(query.dtype).min if self.training: attn_mask = None attention_out = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, is_causal=False, dropout_p=self.attention_dropout if self.training else 0.0) attention_out = attention_out.permute(0, 2, 1, 3).contiguous() new_attention_out_shape = attention_out.size()[:-2] + (self.num_heads * self.attention_head_size,) attention_out = attention_out.view(new_attention_out_shape) attention_out = F.layer_norm(F.dropout(F.linear(attention_out, self.out_proj_weight, self.out_proj_bias), p=self.dropout, training=self.training) + hidden_states, normalized_shape=self.norm1_weight.shape, weight=self.norm1_weight, bias=self.norm1_bias) hidden_states = self.act_fn_callable(F.linear(attention_out, self.linear1_weight, self.linear1_bias)) hidden_states = F.layer_norm(attention_out + F.dropout(F.linear(hidden_states, self.linear2_weight, self.linear2_bias), p=self.dropout, training=self.training), normalized_shape=self.norm2_weight.shape, weight=self.norm2_weight, bias=self.norm2_bias) return (hidden_states,) class ViTLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, vit_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([vit_layer.attention.attention.query.weight, vit_layer.attention.attention.key.weight, vit_layer.attention.attention.value.weight])) self.in_proj_bias = nn.Parameter(torch.cat([vit_layer.attention.attention.query.bias, vit_layer.attention.attention.key.bias, vit_layer.attention.attention.value.bias])) self.out_proj_weight = vit_layer.attention.output.dense.weight self.out_proj_bias = vit_layer.attention.output.dense.bias self.linear1_weight = vit_layer.intermediate.dense.weight self.linear1_bias = vit_layer.intermediate.dense.bias self.linear2_weight = vit_layer.output.dense.weight self.linear2_bias = vit_layer.output.dense.bias self.norm1_eps = vit_layer.layernorm_before.eps self.norm1_weight = vit_layer.layernorm_before.weight self.norm1_bias = vit_layer.layernorm_before.bias self.norm2_eps = vit_layer.layernorm_after.eps self.norm2_weight = vit_layer.layernorm_after.weight self.norm2_bias = vit_layer.layernorm_after.bias self.num_heads = vit_layer.attention.attention.num_attention_heads self.embed_dim = int(vit_layer.attention.attention.attention_head_size * self.num_heads) self.is_last_layer = False self.norm_first = True self.original_layers_mapping = {'in_proj_weight': ['attention.attention.query.weight', 'attention.attention.key.weight', 'attention.attention.value.weight'], 'in_proj_bias': ['attention.attention.query.bias', 'attention.attention.key.bias', 'attention.attention.value.bias'], 'out_proj_weight': 'attention.output.dense.weight', 'out_proj_bias': 'attention.output.dense.bias', 'linear1_weight': 'intermediate.dense.weight', 'linear1_bias': 'intermediate.dense.bias', 'linear2_weight': 'output.dense.weight', 'linear2_bias': 'output.dense.bias', 'norm1_weight': 'layernorm_before.weight', 'norm1_bias': 'layernorm_before.bias', 'norm2_weight': 'layernorm_after.weight', 'norm2_bias': 'layernorm_after.bias'} self.validate_bettertransformer() def forward(self, hidden_states, output_attentions: bool, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0) else: raise NotImplementedError('Training and Autocast are not implemented for BetterTransformer + ViT. Please open an issue.') return (hidden_states,) class ViltLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, vilt_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.weight, vilt_layer.attention.attention.key.weight, vilt_layer.attention.attention.value.weight])) self.in_proj_bias = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.bias, vilt_layer.attention.attention.key.bias, vilt_layer.attention.attention.value.bias])) self.out_proj_weight = vilt_layer.attention.output.dense.weight self.out_proj_bias = vilt_layer.attention.output.dense.bias self.linear1_weight = vilt_layer.intermediate.dense.weight self.linear1_bias = vilt_layer.intermediate.dense.bias self.linear2_weight = vilt_layer.output.dense.weight self.linear2_bias = vilt_layer.output.dense.bias self.norm1_eps = vilt_layer.layernorm_before.eps self.norm1_weight = vilt_layer.layernorm_before.weight self.norm1_bias = vilt_layer.layernorm_before.bias self.norm2_eps = vilt_layer.layernorm_after.eps self.norm2_weight = vilt_layer.layernorm_after.weight self.norm2_bias = vilt_layer.layernorm_after.bias self.num_heads = vilt_layer.attention.attention.num_attention_heads self.embed_dim = int(vilt_layer.attention.attention.attention_head_size * self.num_heads) self.is_last_layer = False self.norm_first = True self.original_layers_mapping = {'in_proj_weight': ['attention.attention.query.weight', 'attention.attention.key.weight', 'attention.attention.value.weight'], 'in_proj_bias': ['attention.attention.query.bias', 'attention.attention.key.bias', 'attention.attention.value.bias'], 'out_proj_weight': 'attention.output.dense.weight', 'out_proj_bias': 'attention.output.dense.bias', 'linear1_weight': 'intermediate.dense.weight', 'linear1_bias': 'intermediate.dense.bias', 'linear2_weight': 'output.dense.weight', 'linear2_bias': 'output.dense.bias', 'norm1_weight': 'layernorm_before.weight', 'norm1_bias': 'layernorm_before.bias', 'norm2_weight': 'layernorm_after.weight', 'norm2_bias': 'layernorm_after.bias'} self.validate_bettertransformer() def forward(self, hidden_states, layer_head_mask, output_attentions: bool, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0) else: raise NotImplementedError('Training and Autocast are not implemented for BetterTransformer + Vilt. Please open an issue.') return (hidden_states,) class Wav2Vec2EncoderLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, wav2vec2_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([wav2vec2_layer.attention.q_proj.weight, wav2vec2_layer.attention.k_proj.weight, wav2vec2_layer.attention.v_proj.weight])) self.in_proj_bias = nn.Parameter(torch.cat([wav2vec2_layer.attention.q_proj.bias, wav2vec2_layer.attention.k_proj.bias, wav2vec2_layer.attention.v_proj.bias])) self.out_proj_weight = wav2vec2_layer.attention.out_proj.weight self.out_proj_bias = wav2vec2_layer.attention.out_proj.bias self.linear1_weight = wav2vec2_layer.feed_forward.intermediate_dense.weight self.linear1_bias = wav2vec2_layer.feed_forward.intermediate_dense.bias self.linear2_weight = wav2vec2_layer.feed_forward.output_dense.weight self.linear2_bias = wav2vec2_layer.feed_forward.output_dense.bias self.norm1_eps = wav2vec2_layer.layer_norm.eps self.norm1_weight = wav2vec2_layer.layer_norm.weight self.norm1_bias = wav2vec2_layer.layer_norm.bias self.norm2_eps = wav2vec2_layer.final_layer_norm.eps self.norm2_weight = wav2vec2_layer.final_layer_norm.weight self.norm2_bias = wav2vec2_layer.final_layer_norm.bias self.num_heads = wav2vec2_layer.attention.num_heads self.embed_dim = wav2vec2_layer.attention.embed_dim self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['attention.q_proj.weight', 'attention.k_proj.weight', 'attention.v_proj.weight'], 'in_proj_bias': ['attention.q_proj.bias', 'attention.k_proj.bias', 'attention.v_proj.bias'], 'out_proj_weight': 'attention.out_proj.weight', 'out_proj_bias': 'attention.out_proj.bias', 'linear1_weight': 'feed_forward.intermediate_dense.weight', 'linear1_bias': 'feed_forward.intermediate_dense.bias', 'linear2_weight': 'feed_forward.output_dense.weight', 'linear2_bias': 'feed_forward.output_dense.bias', 'norm1_weight': 'layer_norm.weight', 'norm1_bias': 'layer_norm.bias', 'norm1_eps': 'layer_norm.eps', 'norm2_weight': 'final_layer_norm.weight', 'norm2_bias': 'final_layer_norm.bias', 'norm2_eps': 'final_layer_norm.eps'} if config.do_stable_layer_norm: self.norm_first = True self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, output_attentions: bool, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if hidden_states.is_nested: attention_mask = None if attention_mask is not None: attention_mask = attention_mask.bool() if len(attention_mask.shape) == 4: attention_mask = attention_mask.squeeze(1)[:, 0] attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0) else: raise NotImplementedError('Training and Autocast are not implemented for BetterTransformer + Wav2Vec2. Please open an issue.') return (hidden_states,) class FSMTEncoderLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, fsmt_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([fsmt_layer.self_attn.q_proj.weight, fsmt_layer.self_attn.k_proj.weight, fsmt_layer.self_attn.v_proj.weight])) self.in_proj_bias = nn.Parameter(torch.cat([fsmt_layer.self_attn.q_proj.bias, fsmt_layer.self_attn.k_proj.bias, fsmt_layer.self_attn.v_proj.bias])) self.out_proj_weight = fsmt_layer.self_attn.out_proj.weight self.out_proj_bias = fsmt_layer.self_attn.out_proj.bias self.linear1_weight = fsmt_layer.fc1.weight self.linear1_bias = fsmt_layer.fc1.bias self.linear2_weight = fsmt_layer.fc2.weight self.linear2_bias = fsmt_layer.fc2.bias self.norm1_eps = fsmt_layer.self_attn_layer_norm.eps self.norm1_weight = fsmt_layer.self_attn_layer_norm.weight self.norm1_bias = fsmt_layer.self_attn_layer_norm.bias self.norm2_eps = fsmt_layer.final_layer_norm.eps self.norm2_weight = fsmt_layer.final_layer_norm.weight self.norm2_bias = fsmt_layer.final_layer_norm.bias self.num_heads = fsmt_layer.self_attn.num_heads self.embed_dim = fsmt_layer.self_attn.embed_dim self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['self_attn.q_proj.weight', 'self_attn.k_proj.weight', 'self_attn.v_proj.weight'], 'in_proj_bias': ['self_attn.q_proj.bias', 'self_attn.k_proj.bias', 'self_attn.v_proj.bias'], 'out_proj_weight': 'self_attn.out_proj.weight', 'out_proj_bias': 'self_attn.out_proj.bias', 'linear1_weight': 'fc1.weight', 'linear1_bias': 'fc1.bias', 'linear2_weight': 'fc2.weight', 'linear2_bias': 'fc2.bias', 'norm1_weight': 'self_attn_layer_norm.weight', 'norm1_bias': 'self_attn_layer_norm.bias', 'norm2_weight': 'final_layer_norm.weight', 'norm2_bias': 'final_layer_norm.bias'} self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, output_attentions: bool, position_bias=None, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if not hasattr(hidden_states, 'original_shape'): original_shape = hidden_states.shape else: original_shape = hidden_states.original_shape if hidden_states.is_nested: attention_mask = None if attention_mask is not None: attention_mask = attention_mask.bool() attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) if hidden_states.shape[0] != attention_mask.shape[0]: hidden_states = hidden_states.transpose(1, 0) original_shape = hidden_states.shape hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if not self.is_last_layer: hidden_states.original_shape = original_shape elif hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0, original_shape) else: raise NotImplementedError('Training and Autocast are not implemented for BetterTransformer + FSMT. Please open an issue.') return (hidden_states, attention_mask) class ProphetNetEncoderLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, prophetnet_layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.config = config self.in_proj_weight = nn.Parameter(torch.cat([prophetnet_layer.self_attn.query_proj.weight, prophetnet_layer.self_attn.key_proj.weight, prophetnet_layer.self_attn.value_proj.weight])) self.in_proj_bias = nn.Parameter(torch.cat([prophetnet_layer.self_attn.query_proj.bias, prophetnet_layer.self_attn.key_proj.bias, prophetnet_layer.self_attn.value_proj.bias])) self.out_proj_weight = prophetnet_layer.self_attn.out_proj.weight self.out_proj_bias = prophetnet_layer.self_attn.out_proj.bias self.linear1_weight = prophetnet_layer.feed_forward.intermediate.weight self.linear1_bias = prophetnet_layer.feed_forward.intermediate.bias self.linear2_weight = prophetnet_layer.feed_forward.output.weight self.linear2_bias = prophetnet_layer.feed_forward.output.bias self.norm1_eps = prophetnet_layer.self_attn_layer_norm.eps self.norm1_weight = prophetnet_layer.self_attn_layer_norm.weight self.norm1_bias = prophetnet_layer.self_attn_layer_norm.bias self.norm2_eps = prophetnet_layer.feed_forward_layer_norm.eps self.norm2_weight = prophetnet_layer.feed_forward_layer_norm.weight self.norm2_bias = prophetnet_layer.feed_forward_layer_norm.bias self.num_heads = prophetnet_layer.self_attn.num_attn_heads self.embed_dim = prophetnet_layer.self_attn.head_dim * self.num_heads self.is_last_layer = False self.original_layers_mapping = {'in_proj_weight': ['self_attn.query_proj.weight', 'self_attn.key_proj.weight', 'self_attn.value_proj.weight'], 'in_proj_bias': ['self_attn.query_proj.bias', 'self_attn.key_proj.bias', 'self_attn.value_proj.bias'], 'out_proj_weight': 'self_attn.out_proj.weight', 'out_proj_bias': 'self_attn.out_proj.bias', 'linear1_weight': 'feed_forward.intermediate.weight', 'linear1_bias': 'feed_forward.intermediate.bias', 'linear2_weight': 'feed_forward.output.weight', 'linear2_bias': 'feed_forward.output.bias', 'norm1_weight': 'self_attn_layer_norm.weight', 'norm1_bias': 'self_attn_layer_norm.bias', 'norm2_weight': 'feed_forward_layer_norm.weight', 'norm2_bias': 'feed_forward_layer_norm.bias'} self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, output_attentions: bool, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if not hasattr(hidden_states, 'original_shape'): original_shape = hidden_states.shape else: original_shape = hidden_states.original_shape if hidden_states.is_nested: attention_mask = None if attention_mask is not None: attention_mask = attention_mask.squeeze(1)[:, 0] attention_mask = attention_mask.bool() attention_mask = torch.reshape(attention_mask, (attention_mask.shape[0], attention_mask.shape[-1])) hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask) attention_mask = None hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) if not self.is_last_layer: hidden_states.original_shape = original_shape elif hidden_states.is_nested and self.is_last_layer: hidden_states = hidden_states.to_padded_tensor(0.0, original_shape) else: raise ValueError('Training and Autocast are not implemented for BetterTransformer + ProphetNet. Please open an issue.') return (hidden_states,) class CLIPLayerBetterTransformer(BetterTransformerBaseLayer, nn.Module): def __init__(self, layer, config): super().__init__(config) super(BetterTransformerBaseLayer, self).__init__() self.in_proj_weight = nn.Parameter(torch.cat([layer.self_attn.q_proj.weight, layer.self_attn.k_proj.weight, layer.self_attn.v_proj.weight])) self.in_proj_bias = nn.Parameter(torch.cat([layer.self_attn.q_proj.bias, layer.self_attn.k_proj.bias, layer.self_attn.v_proj.bias])) self.out_proj_weight = layer.self_attn.out_proj.weight self.out_proj_bias = layer.self_attn.out_proj.bias self.linear1_weight = layer.mlp.fc1.weight self.linear1_bias = layer.mlp.fc1.bias self.linear2_weight = layer.mlp.fc2.weight self.linear2_bias = layer.mlp.fc2.bias self.norm1_eps = layer.layer_norm1.eps self.norm1_weight = layer.layer_norm1.weight self.norm1_bias = layer.layer_norm1.bias self.norm2_eps = layer.layer_norm2.eps self.norm2_weight = layer.layer_norm2.weight self.norm2_bias = layer.layer_norm2.bias self.num_heads = layer.self_attn.num_heads self.embed_dim = layer.self_attn.embed_dim self.is_last_layer = False self.norm_first = True self.original_layers_mapping = {'in_proj_weight': ['self_attn.q_proj.weight', 'self_attn.k_proj.weight', 'self_attn.v_proj.weight'], 'in_proj_bias': ['self_attn.q_proj.bias', 'self_attn.k_proj.bias', 'self_attn.v_proj.bias'], 'out_proj_weight': 'self_attn.out_proj.weight', 'out_proj_bias': 'self_attn.out_proj.bias', 'linear1_weight': 'mlp.fc1.weight', 'linear1_bias': 'mlp.fc1.bias', 'linear2_weight': 'mlp.fc2.weight', 'linear2_bias': 'mlp.fc2.bias', 'norm1_eps': 'layer_norm1.eps', 'norm1_weight': 'layer_norm1.weight', 'norm1_bias': 'layer_norm1.bias', 'norm2_eps': 'layer_norm2.eps', 'norm2_weight': 'layer_norm2.weight', 'norm2_bias': 'layer_norm2.bias'} self.validate_bettertransformer() def forward(self, hidden_states, attention_mask, causal_attention_mask, output_attentions: bool, *_, **__): if output_attentions: raise ValueError('output_attentions=True can not be supported with BetterTransformer.') if not self.training and (not torch.is_autocast_enabled()) and (not torch.is_autocast_cpu_enabled()): if attention_mask is not None or causal_attention_mask is not None: raise ValueError('Please do not use attention masks when using `BetterTransformer` converted vision models') hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask) else: raise NotImplementedError('Training and Autocast are not implemented for BetterTransformer + CLIP. Please open an issue.') return (hidden_states,) def _get_activation_function(self, config: 'PretrainedConfig'): if hasattr(config, 'vision_config') and hasattr(config, 'text_config'): assert config.vision_config.hidden_act == config.text_config.hidden_act return config.vision_config.hidden_act else: return config.hidden_act # File: optimum-main/optimum/bettertransformer/transformation.py import logging import os import types from copy import deepcopy from typing import TYPE_CHECKING, Dict, Optional, Union import torch from packaging.version import parse from ..utils import check_if_pytorch_greater, is_accelerate_available, recurse_getattr, recurse_setattr from .models import BetterTransformerManager if TYPE_CHECKING: from transformers import PreTrainedModel logger = logging.getLogger(__name__) if is_accelerate_available(): from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import remove_hook_from_module ERROR_MESSAGE = 'The Better Transformers implementation for the model {model_name} has not been implemented yet. Please open an issue requesting the addition of this model with its `BetterTransformer` implementation.' def raise_save_or_push_incompatible(*_, **__): raise ValueError('You are trying to save or push a model that has been converted with `BetterTransformer`.', ' Please revert the model to its original state before calling `save_pretrained` or `push_to_hub`.', ' By calling model = BetterTransformer.reverse(model) before saving or pushing.') def replace_to_bettertransformer(model, config): for (name, module) in model.named_children(): if hasattr(module, 'SCB'): raise ValueError('`load_in_8bit` and `BetterTransformers` are mutually exclusive', ' please pass a model that is not loaded in 8-bit.') target_classes = list(BetterTransformerManager.MODEL_MAPPING[config.model_type].keys()) if config.model_type in BetterTransformerManager.OVERWRITE_METHODS: for (class_name, method_name_and_replacement) in BetterTransformerManager.OVERWRITE_METHODS[config.model_type].items(): if module.__class__.__name__ == class_name: method_name = method_name_and_replacement[0] new_method = method_name_and_replacement[1] setattr(module, method_name, types.MethodType(new_method, module)) should_replace_module = False for target_class in target_classes: should_replace_module = module.__class__.__name__ == target_class if should_replace_module: bettertransformer_module = BetterTransformerManager.MODEL_MAPPING[config.model_type][target_class](module, config) model._modules[name] = bettertransformer_module break if len(list(module.children())) > 0 and should_replace_module is False: if config.model_type not in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM or (config.model_type in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM and name not in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM[config.model_type]): replace_to_bettertransformer(module, config) return model def set_last_layer(model: torch.nn.Module): dict_named_module = dict(model.named_modules()) sort_fn = lambda list_modules: [module.__class__.__name__ for module in list_modules] modulelist_lengths = [] for key in dict_named_module.keys(): if isinstance(dict_named_module[key], torch.nn.ModuleList) and 'encoder' in key and (model.config.model_type not in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM or (model.config.model_type in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM and all((name not in key for name in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM[model.config.model_type])))): modulelist_lengths.append((len(dict_named_module[key]), key)) if len(modulelist_lengths) > 1: (_, key) = max(modulelist_lengths, key=lambda item: item[0]) largest_module_list = dict_named_module[key] for module in largest_module_list[-1].modules(): if 'LayerBetterTransformer' in module.__class__.__name__: setattr(module, 'is_last_layer', True) return else: for key in dict_named_module.keys(): if isinstance(dict_named_module[key], torch.nn.ModuleList) and all(('LayerBetterTransformer' in module_name for module_name in sort_fn(dict_named_module[key]))): setattr(dict_named_module[key][-1], 'is_last_layer', True) return raise Exception(f'The transformation of the model {model.__class__.__name__} to BetterTransformer failed while it should not. Please fill a bug report or open a PR to support this model at https://github.com/huggingface/optimum/') class BetterTransformer(object): @check_if_pytorch_greater('1.13.99', 'Please upgrade PyTorch following https://pytorch.org/get-started/locally/ in order to use BetterTransformer.') def transform(model: torch.nn.Module, keep_original_model: bool=False, max_memory: Optional[Dict]=None, offload_dir: Optional[Union[str, os.PathLike]]=None, **kwargs) -> torch.nn.Module: hf_config = model.config if hf_config.model_type in ['falcon', 'gpt_bigcode', 'llama', 'whisper']: raise ValueError(f'Transformers now supports natively BetterTransformer optimizations (torch.nn.functional.scaled_dot_product_attention) for the model type {hf_config.model_type}. As such, there is no need to use `model.to_bettertransformers()` or `BetterTransformer.transform(model)` from the Optimum library. Please upgrade to transformers>=4.36 and torch>=2.1.1 to use it. Details: https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-and-memory-efficient-attention-through-pytorchs-scaleddotproductattention.') if hasattr(model, 'hf_device_map'): load_accelerate = True hf_device_map = model.hf_device_map else: load_accelerate = False if hasattr(model, 'use_bettertransformer') and model.use_bettertransformer is True: raise Exception('`BetterTransform.transform()` was called on a model already using Better Transformer modeling.') if BetterTransformerManager.cannot_support(model.config.model_type): raise ValueError(f'The model type {model.config.model_type} can not be supported to be used with BetterTransformer. The identified reason is: {BetterTransformerManager.CAN_NOT_BE_SUPPORTED[model.config.model_type]}. Currently supported models are: {BetterTransformerManager.MODEL_MAPPING.keys()}.') if not BetterTransformerManager.supports(model.config.model_type): raise NotImplementedError(f'The model type {model.config.model_type} is not yet supported to be used with BetterTransformer. Feel free to open an issue at https://github.com/huggingface/optimum/issues if you would like this model type to be supported. Currently supported models are: {BetterTransformerManager.MODEL_MAPPING.keys()}.') if parse(torch.__version__) <= parse('1.14'): raise ValueError(f'BetterTransformer requires torch>=2.0 but {torch.__version__} is installed. Please upgrade PyTorch.') if load_accelerate: remove_hook_from_module(model, recurse=True) training_mode = model.training if keep_original_model: try: if not check_if_pytorch_greater(2.0, 'Please upgrade PyTorch to >=2.0 to use training mode'): model = model.requires_grad_(False) model_fast = deepcopy(model) except RuntimeError: raise ValueError(f'The model {model.__class__.__name__} does not support `deepcopy` operation that is internally used to create a copy of the original model when using `keep_original_model=True`. Please run the conversion with `keep_original_model=False` and create a new copy of the original model somewhere else.') model_fast = replace_to_bettertransformer(model_fast, hf_config) else: model_fast = replace_to_bettertransformer(model, hf_config) model = None if BetterTransformerManager.requires_nested_tensor(model_fast.config.model_type): set_last_layer(model_fast) setattr(model_fast, 'use_bettertransformer', True) if load_accelerate: all_model_tensors = [name for (name, _) in model_fast.state_dict().items()] for module_name in hf_device_map.keys(): all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)] if len(all_model_tensors) > 0: bt_device_map = infer_auto_device_map(model_fast, max_memory=max_memory) else: bt_device_map = hf_device_map model_fast = dispatch_model(model_fast, bt_device_map, offload_dir=offload_dir) if keep_original_model: model = dispatch_model(model, hf_device_map, offload_dir=offload_dir) logger.warning('The BetterTransformer implementation does not support padding during training, as the fused kernels do not support attention masks. Beware that passing padded batched data during training may result in unexpected outputs. Please refer to https://huggingface.co/docs/optimum/bettertransformer/overview for more details.') model_fast._old_save_pretrained = model_fast.save_pretrained model_fast._old_push_to_hub = model_fast.push_to_hub model_fast.save_pretrained = raise_save_or_push_incompatible model_fast.push_to_hub = raise_save_or_push_incompatible if training_mode: model_fast = model_fast.train() else: model_fast = model_fast.eval() return model_fast def reverse(bt_model: 'PreTrainedModel') -> 'PreTrainedModel': if getattr(bt_model, 'use_bettertransformer', False) is False: raise ValueError('The method BetterTransformer.reverse() should be used on a model already transformed to the BetterTransformer format, which appears to not be the case.') if parse(torch.__version__) <= parse('1.14'): raise ValueError(f'BetterTransformer reverse transform requires torch>=2.0 but {torch.__version__} is installed. Please upgrade PyTorch.') config = bt_model.config if config.model_type not in ['wav2vec2', 'hubert', 'bark']: with torch.device('meta'): reversed_model = bt_model.__class__(config) else: logger.warning('The reverse transform for the architectures wav2vec2, hubert, bark is memory-heavy due to a bug in PyTorch.') reversed_model = bt_model.__class__(config) if bt_model.training is False: reversed_model = reversed_model.eval() reversed_modules_paths = [] for (path, module) in reversed_model.named_modules(): if path.startswith(tuple(reversed_modules_paths)): continue if config.model_type in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM and any((subname in path for subname in BetterTransformerManager.EXCLUDE_FROM_TRANSFORM[config.model_type])): continue target_classes = list(BetterTransformerManager.MODEL_MAPPING[config.model_type].keys()) has_been_replaced = False for target_class in target_classes: if module.__class__.__name__ == target_class: has_been_replaced = True break if has_been_replaced: recurse_setattr(reversed_model, path, recurse_getattr(bt_model, path)._revert(module)) reversed_modules_paths.append(path + '.') for (path, param) in reversed_model.state_dict().items(): if param.device == torch.device('meta') or not path.startswith(tuple(reversed_modules_paths)): recurse_setattr(reversed_model, path, recurse_getattr(bt_model, path)) for (path, param) in reversed_model.named_buffers(): if param.device == torch.device('meta') or not path.startswith(tuple(reversed_modules_paths)): recurse_setattr(reversed_model, path, recurse_getattr(bt_model, path)) return reversed_model # File: optimum-main/optimum/commands/base.py """""" from abc import ABC from argparse import ArgumentParser, RawTextHelpFormatter from dataclasses import dataclass from typing import TYPE_CHECKING, Optional, Tuple, Type if TYPE_CHECKING: from argparse import Namespace, _SubParsersAction @dataclass(frozen=True) class CommandInfo: name: str help: str subcommand_class: Optional[Type['BaseOptimumCLICommand']] = None formatter_class: Type = RawTextHelpFormatter @property def is_subcommand_info(self): return self.subcommand_class is not None def is_subcommand_info_or_raise(self): if not self.is_subcommand_info: raise ValueError(f'The command info must define a subcommand_class attribute, but got: {self}.') class BaseOptimumCLICommand(ABC): COMMAND: CommandInfo SUBCOMMANDS: Tuple[CommandInfo, ...] = () def __init__(self, subparsers: Optional['_SubParsersAction'], args: Optional['Namespace']=None, command: Optional[CommandInfo]=None, from_defaults_factory: bool=False, parser: Optional['ArgumentParser']=None): if command is not None: self.COMMAND = command if from_defaults_factory: if parser is None: raise ValueError(f'The instance of the original parser must be passed when creating a defaults factory, command: {self}.') self.parser = parser self.subparsers = subparsers else: if subparsers is None: raise ValueError(f'A subparsers instance is needed when from_defaults_factory=False, command: {self}.') self.parser = subparsers.add_parser(self.COMMAND.name, help=self.COMMAND.help) self.parse_args(self.parser) def defaults_factory(args): return self.__class__(self.subparsers, args, command=self.COMMAND, from_defaults_factory=True, parser=self.parser) self.parser.set_defaults(func=defaults_factory) for subcommand in self.SUBCOMMANDS: if not isinstance(subcommand, CommandInfo): raise ValueError(f'Subcommands must be instances of CommandInfo, but got {type(subcommand)} here.') self.register_subcommand(subcommand) self.args = args @property def subparsers(self): subparsers = getattr(self, '_subparsers', None) if subparsers is None: if self.SUBCOMMANDS: self._subparsers = self.parser.add_subparsers() else: self._subparsers = None return self._subparsers @subparsers.setter def subparsers(self, subparsers: Optional['_SubParsersAction']): self._subparsers = subparsers @property def registered_subcommands(self): if not hasattr(self, '_registered_subcommands'): self._registered_subcommands = [] return self._registered_subcommands @staticmethod def parse_args(parser: 'ArgumentParser'): pass def register_subcommand(self, command_info: CommandInfo): command_info.is_subcommand_info_or_raise() self.SUBCOMMANDS = self.SUBCOMMANDS + (command_info,) self.registered_subcommands.append(command_info.subcommand_class(self.subparsers, command=command_info)) def run(self): self.parser.print_help() class RootOptimumCLICommand(BaseOptimumCLICommand): COMMAND = CommandInfo(name='root', help='optimum-cli root command') def __init__(self, cli_name: str, usage: Optional[str]=None, args: Optional['Namespace']=None): self.parser = ArgumentParser(cli_name, usage=usage) self.subparsers = self.parser.add_subparsers() self.args = None # File: optimum-main/optimum/commands/env.py import platform import huggingface_hub from transformers import __version__ as transformers_version from transformers.utils import is_tf_available, is_torch_available from ..version import __version__ as version from . import BaseOptimumCLICommand, CommandInfo class EnvironmentCommand(BaseOptimumCLICommand): COMMAND = CommandInfo(name='env', help='Get information about the environment used.') @staticmethod def format_dict(d): return '\n'.join([f'- {prop}: {val}' for (prop, val) in d.items()]) + '\n' def run(self): pt_version = 'not installed' pt_cuda_available = 'NA' if is_torch_available(): import torch pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() tf_version = 'not installed' tf_cuda_available = 'NA' if is_tf_available(): import tensorflow as tf tf_version = tf.__version__ try: tf_cuda_available = tf.test.is_gpu_available() except AttributeError: tf_cuda_available = bool(tf.config.list_physical_devices('GPU')) info = {'`optimum` version': version, '`transformers` version': transformers_version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'PyTorch version (GPU?)': f'{pt_version} (cuda availabe: {pt_cuda_available})', 'Tensorflow version (GPU?)': f'{tf_version} (cuda availabe: {tf_cuda_available})'} print('\nCopy-and-paste the text below in your GitHub issue:\n') print(self.format_dict(info)) return info # File: optimum-main/optimum/commands/export/base.py """""" from .. import BaseOptimumCLICommand, CommandInfo from .onnx import ONNXExportCommand from .tflite import TFLiteExportCommand class ExportCommand(BaseOptimumCLICommand): COMMAND = CommandInfo(name='export', help='Export PyTorch and TensorFlow models to several format.') SUBCOMMANDS = (CommandInfo(name='onnx', help='Export PyTorch and TensorFlow to ONNX.', subcommand_class=ONNXExportCommand), CommandInfo(name='tflite', help='Export TensorFlow to TensorFlow Lite.', subcommand_class=TFLiteExportCommand)) # File: optimum-main/optimum/commands/export/onnx.py """""" import argparse import json from pathlib import Path from typing import TYPE_CHECKING from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from ...exporters import TasksManager from ...utils import DEFAULT_DUMMY_SHAPES from ..base import BaseOptimumCLICommand if TYPE_CHECKING: from argparse import ArgumentParser def parse_args_onnx(parser): required_group = parser.add_argument_group('Required arguments') required_group.add_argument('-m', '--model', type=str, required=True, help='Model ID on huggingface.co or path on disk to load model from.') required_group.add_argument('output', type=Path, help='Path indicating the directory where to store the generated ONNX model.') optional_group = parser.add_argument_group('Optional arguments') optional_group.add_argument('--task', default='auto', help=f'The task to export the model for. If not specified, the task will be auto-inferred based on the model. Available tasks depend on the model, but are among: {str(TasksManager.get_all_tasks())}. For decoder models, use `xxx-with-past` to export the model using past key values in the decoder.') optional_group.add_argument('--opset', type=int, default=None, help='If specified, ONNX opset version to export the model with. Otherwise, the default opset for the given model architecture will be used.') optional_group.add_argument('--device', type=str, default='cpu', help='The device to use to do the export. Defaults to "cpu".') optional_group.add_argument('--fp16', action='store_true', help='Use half precision during the export. PyTorch-only, requires `--device cuda`.') optional_group.add_argument('--dtype', type=str, default=None, choices=['fp32', 'fp16', 'bf16'], help='The floating point precision to use for the export. Supported options: fp32 (float32), fp16 (float16), bf16 (bfloat16).') optional_group.add_argument('--optimize', type=str, default=None, choices=['O1', 'O2', 'O3', 'O4'], help='Allows to run ONNX Runtime optimizations directly during the export. Some of these optimizations are specific to ONNX Runtime, and the resulting ONNX will not be usable with other runtime as OpenVINO or TensorRT. Possible options:\n - O1: Basic general optimizations\n - O2: Basic and extended general optimizations, transformers-specific fusions\n - O3: Same as O2 with GELU approximation\n - O4: Same as O3 with mixed precision (fp16, GPU-only, requires `--device cuda`)') optional_group.add_argument('--monolith', action='store_true', help='Forces to export the model as a single ONNX file. By default, the ONNX exporter may break the model in several ONNX files, for example for encoder-decoder models where the encoder should be run only once while the decoder is looped over.') optional_group.add_argument('--no-post-process', action='store_true', help='Allows to disable any post-processing done by default on the exported ONNX models. For example, the merging of decoder and decoder-with-past models into a single ONNX model file to reduce memory usage.') optional_group.add_argument('--variant', type=str, default='default', help='Select a variant of the model to export.') optional_group.add_argument('--framework', type=str, choices=['pt', 'tf'], default=None, help="The framework to use for the ONNX export. If not provided, will attempt to use the local checkpoint's original framework or what is available in the environment.") optional_group.add_argument('--atol', type=float, default=None, help='If specified, the absolute difference tolerance when validating the model. Otherwise, the default atol for the model will be used.') optional_group.add_argument('--cache_dir', type=str, default=HUGGINGFACE_HUB_CACHE, help='Path indicating where to store cache.') optional_group.add_argument('--trust-remote-code', action='store_true', help='Allows to use custom code for the modeling hosted in the model repository. This option should only be set for repositories you trust and in which you have read the code, as it will execute on your local machine arbitrary code present in the model repository.') optional_group.add_argument('--pad_token_id', type=int, default=None, help='This is needed by some models, for some tasks. If not provided, will attempt to use the tokenizer to guess it.') optional_group.add_argument('--library-name', type=str, choices=['transformers', 'diffusers', 'timm', 'sentence_transformers'], default=None, help="The library on the model. If not provided, will attempt to infer the local checkpoint's library") optional_group.add_argument('--model-kwargs', type=json.loads, help='Any kwargs passed to the model forward, or used to customize the export for a given model.') optional_group.add_argument('--legacy', action='store_true', help='Export decoder only models in three files (without + with past and the resulting merged model).Also disable the use of position_ids for text-generation models that require it for batched generation. This argument is introduced for backward compatibility and will be removed in a future release of Optimum.') optional_group.add_argument('--no-dynamic-axes', action='store_true', help='Disable dynamic axes during ONNX export') optional_group.add_argument('--no-constant-folding', action='store_true', help='PyTorch-only argument. Disables PyTorch ONNX export constant folding.') input_group = parser.add_argument_group('Input shapes (if necessary, this allows to override the shapes of the input given to the ONNX exporter, that requires an example input).') doc_input = 'to use in the example input given to the ONNX export.' input_group.add_argument('--batch_size', type=int, default=DEFAULT_DUMMY_SHAPES['batch_size'], help=f'Text tasks only. Batch size {doc_input}') input_group.add_argument('--sequence_length', type=int, default=DEFAULT_DUMMY_SHAPES['sequence_length'], help=f'Text tasks only. Sequence length {doc_input}') input_group.add_argument('--num_choices', type=int, default=DEFAULT_DUMMY_SHAPES['num_choices'], help=f'Text tasks only. Num choices {doc_input}') input_group.add_argument('--width', type=int, default=DEFAULT_DUMMY_SHAPES['width'], help=f'Image tasks only. Width {doc_input}') input_group.add_argument('--height', type=int, default=DEFAULT_DUMMY_SHAPES['height'], help=f'Image tasks only. Height {doc_input}') input_group.add_argument('--num_channels', type=int, default=DEFAULT_DUMMY_SHAPES['num_channels'], help=f'Image tasks only. Number of channels {doc_input}') input_group.add_argument('--feature_size', type=int, default=DEFAULT_DUMMY_SHAPES['feature_size'], help=f'Audio tasks only. Feature size {doc_input}') input_group.add_argument('--nb_max_frames', type=int, default=DEFAULT_DUMMY_SHAPES['nb_max_frames'], help=f'Audio tasks only. Maximum number of frames {doc_input}') input_group.add_argument('--audio_sequence_length', type=int, default=DEFAULT_DUMMY_SHAPES['audio_sequence_length'], help=f'Audio tasks only. Audio sequence length {doc_input}') input_group.add_argument('--point_batch_size', type=int, default=DEFAULT_DUMMY_SHAPES['point_batch_size'], help='For Segment Anything. It corresponds to how many segmentation masks we want the model to predict per input point.') input_group.add_argument('--nb_points_per_image', type=int, default=DEFAULT_DUMMY_SHAPES['nb_points_per_image'], help='For Segment Anything. It corresponds to the number of points per segmentation masks.') parser.add_argument('--for-ort', action='store_true', help=argparse.SUPPRESS) class ONNXExportCommand(BaseOptimumCLICommand): @staticmethod def parse_args(parser: 'ArgumentParser'): return parse_args_onnx(parser) def run(self): from ...exporters.onnx import main_export input_shapes = {} for input_name in DEFAULT_DUMMY_SHAPES.keys(): if hasattr(self.args, input_name): input_shapes[input_name] = getattr(self.args, input_name) main_export(model_name_or_path=self.args.model, output=self.args.output, task=self.args.task, opset=self.args.opset, device=self.args.device, fp16=self.args.fp16, dtype=self.args.dtype, optimize=self.args.optimize, monolith=self.args.monolith, no_post_process=self.args.no_post_process, framework=self.args.framework, atol=self.args.atol, cache_dir=self.args.cache_dir, trust_remote_code=self.args.trust_remote_code, pad_token_id=self.args.pad_token_id, for_ort=self.args.for_ort, use_subprocess=True, _variant=self.args.variant, library_name=self.args.library_name, legacy=self.args.legacy, no_dynamic_axes=self.args.no_dynamic_axes, model_kwargs=self.args.model_kwargs, do_constant_folding=not self.args.no_constant_folding, **input_shapes) # File: optimum-main/optimum/commands/export/tflite.py """""" import subprocess import sys from pathlib import Path from typing import TYPE_CHECKING, Optional from ...exporters import TasksManager from ...exporters.tflite import QuantizationApproach from ..base import BaseOptimumCLICommand if TYPE_CHECKING: from argparse import ArgumentParser, Namespace, _SubParsersAction from ..base import CommandInfo def parse_args_tflite(parser: 'ArgumentParser'): required_group = parser.add_argument_group('Required arguments') required_group.add_argument('-m', '--model', type=str, required=True, help='Model ID on huggingface.co or path on disk to load model from.') required_group.add_argument('output', type=Path, help='Path indicating the directory where to store generated TFLite model.') optional_group = parser.add_argument_group('Optional arguments') optional_group.add_argument('--task', default='auto', help=f'The task to export the model for. If not specified, the task will be auto-inferred based on the model. Available tasks depend on the model, but are among: {str(TasksManager.get_all_tasks())}. For decoder models, use `xxx-with-past` to export the model using past key values in the decoder.') optional_group.add_argument('--atol', type=float, default=None, help='If specified, the absolute difference tolerance when validating the model. Otherwise, the default atol for the model will be used.') optional_group.add_argument('--pad_token_id', type=int, default=None, help='This is needed by some models, for some tasks. If not provided, will attempt to use the tokenizer to guess it.') optional_group.add_argument('--cache_dir', type=str, default=None, help='Path indicating where to store cache.') optional_group.add_argument('--trust-remote-code', action='store_true', help='Allow to use custom code for the modeling hosted in the model repository. This option should only be set for repositories you trust and in which you have read the code, as it will execute on your local machine arbitrary code present in the model repository.') input_group = parser.add_argument_group('Input shapes') doc_input = 'that the TFLite exported model will be able to take as input.' input_group.add_argument('--batch_size', type=int, default=1, help=f'Batch size {doc_input}') input_group.add_argument('--sequence_length', type=int, default=None, help=f'Sequence length {doc_input}') input_group.add_argument('--num_choices', type=int, default=None, help=f'Only for the multiple-choice task. Num choices {doc_input}') input_group.add_argument('--width', type=int, default=None, help=f'Vision tasks only. Image width {doc_input}') input_group.add_argument('--height', type=int, default=None, help=f'Vision tasks only. Image height {doc_input}') input_group.add_argument('--num_channels', type=int, default=None, help=f'Vision tasks only. Number of channels used to represent the image {doc_input} (GREY = 1, RGB = 3, ARGB = 4)') input_group.add_argument('--feature_size', type=int, default=None, help=f'Audio tasks only. Feature dimension of the extracted features by the feature extractor {doc_input}') input_group.add_argument('--nb_max_frames', type=int, default=None, help=f'Audio tasks only. Maximum number of frames {doc_input}') input_group.add_argument('--audio_sequence_length', type=int, default=None, help=f'Audio tasks only. Audio sequence length {doc_input}') quantization_group = parser.add_argument_group('Quantization') quantization_group.add_argument('--quantize', choices=[e.value for e in QuantizationApproach], type=str, default=None, help='The method of quantization to perform, possible choices: "int8-dynamic", "int8", "int8x16", "fp16". No quantization will happen if left unspecified.') quantization_group.add_argument('--fallback_to_float', action='store_true', help='Whether to fall back to the float implementation for operators without an integer implementation. This needs to be disabled for integer-only hardware.') quantization_group.add_argument('--inputs_type', choices=['int8', 'uint8'], default=None, help='The inputs will be expected to be of the specified type. This is useful for integer-only hardware.') quantization_group.add_argument('--outputs_type', choices=['int8', 'uint8'], default=None, help='The outputs will be of the specified type. This is useful for integer-only hardware.') calibration_dataset_group = parser.add_argument_group('Quantization Calibration dataset') calibration_dataset_group.add_argument('--calibration_dataset', type=str, default=None, help='The dataset to use to calibrate integer ranges when quantizing the model. This is needed to perform static quantization.') calibration_dataset_group.add_argument('--calibration_dataset_config_name', type=str, default=None, help='The calibration dataset configuration name, this is needed for some datasets.') calibration_dataset_group.add_argument('--num_calibration_samples', type=int, default=200, help='The number of samples in the calibration dataset to use for calibration, usually something around 100-200 is enough.') calibration_dataset_group.add_argument('--calibration_split', type=str, default=None, help='The split of the calibration dataset to use.') calibration_dataset_group.add_argument('--primary_key', type=str, default=None, help='The name of the column in the dataset containing the main data to preprocess. Only for text-classification and token-classification. ') calibration_dataset_group.add_argument('--secondary_key', type=str, default=None, help='The name of the second column in the dataset containing the main data to preprocess, not always needed. Only for text-classification and token-classification. ') calibration_dataset_group.add_argument('--question_key', type=str, default=None, help='The name of the column containing the question in the dataset. Only for question-answering.') calibration_dataset_group.add_argument('--context_key', type=str, default=None, help='The name of the column containing the context in the dataset. Only for question-answering.') calibration_dataset_group.add_argument('--image_key', type=str, default=None, help='The name of the column containing the image in the dataset. Only for image-classification.') class TFLiteExportCommand(BaseOptimumCLICommand): def __init__(self, subparsers: Optional['_SubParsersAction'], args: Optional['Namespace']=None, command: Optional['CommandInfo']=None, from_defaults_factory: bool=False, parser: Optional['ArgumentParser']=None): super().__init__(subparsers, args, command=command, from_defaults_factory=from_defaults_factory, parser=parser) self.args_string = ' '.join(sys.argv[3:]) @staticmethod def parse_args(parser: 'ArgumentParser'): return parse_args_tflite(parser) def run(self): full_command = f'python3 -m optimum.exporters.tflite {self.args_string}' subprocess.run(full_command, shell=True, check=True) # File: optimum-main/optimum/commands/optimum_cli.py import importlib from pathlib import Path from typing import Dict, List, Optional, Tuple, Type, Union from ..subpackages import load_subpackages from ..utils import logging from .base import BaseOptimumCLICommand, CommandInfo, RootOptimumCLICommand from .env import EnvironmentCommand from .export import ExportCommand logger = logging.get_logger() OPTIMUM_CLI_ROOT_SUBCOMMANDS = [ExportCommand, EnvironmentCommand] _OPTIMUM_CLI_SUBCOMMANDS = [] def optimum_cli_subcommand(parent_command: Optional[Type[BaseOptimumCLICommand]]=None): if parent_command is not None and (not issubclass(parent_command, BaseOptimumCLICommand)): raise ValueError(f'The parent command {parent_command} must be a subclass of BaseOptimumCLICommand') def wrapper(subcommand): if not issubclass(subcommand, BaseOptimumCLICommand): raise ValueError(f'The subcommand {subcommand} must be a subclass of BaseOptimumCLICommand') _OPTIMUM_CLI_SUBCOMMANDS.append((subcommand, parent_command)) return wrapper def resolve_command_to_command_instance(root: RootOptimumCLICommand, commands: List[Type[BaseOptimumCLICommand]]) -> Dict[Type[BaseOptimumCLICommand], BaseOptimumCLICommand]: to_visit = [root] remaining_commands = set(commands) command2command_instance = {} while to_visit: current_command_instance = to_visit.pop(0) if current_command_instance.__class__ in remaining_commands: remaining_commands.remove(current_command_instance.__class__) command2command_instance[current_command_instance.__class__] = current_command_instance if not remaining_commands: break to_visit += current_command_instance.registered_subcommands if remaining_commands: class_names = (command.__name__ for command in remaining_commands) raise RuntimeError(f"Could not find an instance of the following commands in the CLI {root}: {', '.join(class_names)}.") return command2command_instance def dynamic_load_commands_in_register() -> List[Tuple[Union[Type[BaseOptimumCLICommand], CommandInfo], Optional[Type[BaseOptimumCLICommand]]]]: commands_to_register = [] register_dir_path = Path(__file__).parent / 'register' for filename in register_dir_path.iterdir(): if filename.is_dir() or filename.suffix != '.py': if filename.name not in ['__pycache__', 'README.md']: logger.warning(f'Skipping {filename} because only python files are allowed when registering commands dynamically.') continue module_name = f'.register.{filename.stem}' module = importlib.import_module(module_name, package='optimum.commands') commands_to_register_in_file = getattr(module, 'REGISTER_COMMANDS', []) for (command_idx, command) in enumerate(commands_to_register_in_file): if isinstance(command, tuple): (command_or_command_info, parent_command_cls) = command else: command_or_command_info = command parent_command_cls = None if not isinstance(command_or_command_info, CommandInfo) and (not issubclass(command_or_command_info, BaseOptimumCLICommand)): raise ValueError(f'The command at index {command_idx} in {filename} is not of the right type: {type(command_or_command_info)}.') commands_to_register.append((command_or_command_info, parent_command_cls)) return commands_to_register def register_optimum_cli_subcommand(command_or_command_info: Union[Type[BaseOptimumCLICommand], CommandInfo], parent_command: BaseOptimumCLICommand): if not isinstance(command_or_command_info, CommandInfo): command_info = CommandInfo(command_or_command_info.COMMAND.name, help=command_or_command_info.COMMAND.help, subcommand_class=command_or_command_info) else: command_info = command_or_command_info command_info.is_subcommand_info_or_raise() parent_command.register_subcommand(command_info) def main(): root = RootOptimumCLICommand('Optimum CLI tool', usage='optimum-cli') parser = root.parser for subcommand_cls in OPTIMUM_CLI_ROOT_SUBCOMMANDS: register_optimum_cli_subcommand(subcommand_cls, parent_command=root) load_subpackages() commands_to_register = _OPTIMUM_CLI_SUBCOMMANDS + dynamic_load_commands_in_register() command2command_instance = resolve_command_to_command_instance(root, [parent_command_cls for (_, parent_command_cls) in commands_to_register if parent_command_cls is not None]) for (command_or_command_info, parent_command) in commands_to_register: if parent_command is None: parent_command_instance = root else: parent_command_instance = command2command_instance[parent_command] register_optimum_cli_subcommand(command_or_command_info, parent_command=parent_command_instance) args = parser.parse_args() if not hasattr(args, 'func'): parser.print_help() exit(1) service = args.func(args) service.run() if __name__ == '__main__': main() # File: optimum-main/optimum/configuration_utils.py """""" import copy import json import os import re import warnings from typing import Any, Dict, List, Tuple, Union from packaging import version from transformers import PretrainedConfig from transformers import __version__ as transformers_version_str from .utils import logging from .version import __version__ _transformers_version = version.parse(transformers_version_str) _transformers_version_threshold = (4, 22) _transformers_version_is_below_threshold = (_transformers_version.major, _transformers_version.minor) < _transformers_version_threshold if _transformers_version_is_below_threshold: from transformers.utils import cached_path, hf_bucket_url else: from transformers.dynamic_module_utils import custom_object_save from transformers.utils import cached_file, download_url, extract_commit_hash, is_remote_url logger = logging.get_logger(__name__) class BaseConfig(PretrainedConfig): CONFIG_NAME = 'config.json' FULL_CONFIGURATION_FILE = 'config.json' @classmethod def _re_configuration_file(cls): return re.compile(f"{cls.FULL_CONFIGURATION_FILE.split('.')[0]}(.*)\\.json") def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): if os.path.isfile(save_directory): raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file') if not _transformers_version_is_below_threshold: os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) if _transformers_version_is_below_threshold: repo = self._create_or_get_repo(save_directory, **kwargs) else: repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) use_auth_token = kwargs.get('use_auth_token', None) token = kwargs.get('token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') kwargs['token'] = use_auth_token token = use_auth_token files_timestamps = self._get_files_timestamps(save_directory) if _transformers_version_is_below_threshold: os.makedirs(save_directory, exist_ok=True) if self._auto_class is not None: custom_object_save(self, save_directory, config=self) output_config_file = os.path.join(save_directory, self.CONFIG_NAME) self.to_json_file(output_config_file, use_diff=True) logger.info(f'Configuration saved in {output_config_file}') if push_to_hub: if _transformers_version_is_below_threshold: url = self._push_to_hub(repo, commit_message=commit_message) logger.info(f'Configuration pushed to the hub in this commit: {url}') else: self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token) @classmethod def get_configuration_file(cls, configuration_files: List[str]) -> str: configuration_files_map = {} _re_configuration_file = cls._re_configuration_file() for file_name in configuration_files: search = _re_configuration_file.search(file_name) if search is not None: v = search.groups()[0] configuration_files_map[v] = file_name available_versions = sorted(configuration_files_map.keys()) configuration_file = cls.CONFIG_NAME optimum_version = version.parse(__version__) for v in available_versions: if version.parse(v) <= optimum_version: configuration_file = configuration_files_map[v] else: break return configuration_file @classmethod def get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]: original_kwargs = copy.deepcopy(kwargs) (config_dict, kwargs) = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) if '_commit_hash' in config_dict: original_kwargs['_commit_hash'] = config_dict['_commit_hash'] if 'configuration_files' in config_dict: configuration_file = cls.get_configuration_file(config_dict['configuration_files']) (config_dict, kwargs) = cls._get_config_dict(pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs) return (config_dict, kwargs) @classmethod def _get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]: cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) use_auth_token = kwargs.pop('use_auth_token', None) token = kwargs.pop('token', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) trust_remote_code = kwargs.pop('trust_remote_code', None) subfolder = kwargs.pop('subfolder', '') from_pipeline = kwargs.pop('_from_pipeline', None) from_auto_class = kwargs.pop('_from_auto', False) commit_hash = kwargs.pop('_commit_hash', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if trust_remote_code is True: logger.warning('The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.') user_agent = {'file_type': 'config', 'from_auto_class': from_auto_class} if from_pipeline is not None: user_agent['using_pipeline'] = from_pipeline pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): resolved_config_file = pretrained_model_name_or_path is_local = True elif _transformers_version_is_below_threshold and os.path.isdir(pretrained_model_name_or_path): configuration_file = kwargs.pop('_configuration_file', cls.CONFIG_NAME) resolved_config_file = os.path.join(pretrained_model_name_or_path, configuration_file) if not os.path.isfile(resolved_config_file): raise EnvironmentError(f'Could not locate {configuration_file} inside {pretrained_model_name_or_path}.') elif not _transformers_version_is_below_threshold and is_remote_url(pretrained_model_name_or_path): configuration_file = pretrained_model_name_or_path resolved_config_file = download_url(pretrained_model_name_or_path) else: configuration_file = kwargs.pop('_configuration_file', cls.CONFIG_NAME) try: if _transformers_version_is_below_threshold: config_file = hf_bucket_url(pretrained_model_name_or_path, filename=configuration_file, revision=revision, subfolder=subfolder if len(subfolder) > 0 else None, mirror=None) resolved_config_file = cached_path(config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent) else: resolved_config_file = cached_file(pretrained_model_name_or_path, configuration_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) except EnvironmentError: raise except Exception: raise EnvironmentError(f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {configuration_file} file") try: config_dict = cls._dict_from_json_file(resolved_config_file) if _transformers_version_is_below_threshold: config_dict['_commit_hash'] = commit_hash except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError(f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.") if is_local: logger.info(f'loading configuration file {resolved_config_file}') else: logger.info(f'loading configuration file {configuration_file} from cache at {resolved_config_file}') return (config_dict, kwargs) @classmethod def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> 'PretrainedConfig': return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) kwargs.pop('_from_auto', None) kwargs.pop('_from_pipeline', None) if '_commit_hash' in kwargs and '_commit_hash' in config_dict: kwargs['_commit_hash'] = config_dict['_commit_hash'] config = cls(**config_dict) if hasattr(config, 'pruned_heads'): config.pruned_heads = {int(key): value for (key, value) in config.pruned_heads.items()} if 'num_labels' in kwargs and 'id2label' in kwargs: num_labels = kwargs['num_labels'] id2label = kwargs['id2label'] if kwargs['id2label'] is not None else [] if len(id2label) != num_labels: raise ValueError(f"You passed along `num_labels={num_labels}` with an incompatible id to label map: {kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove one of them.") to_remove = [] for (key, value) in kwargs.items(): if hasattr(config, key): setattr(config, key, value) if key != 'torch_dtype': to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(config) if return_unused_kwargs: return (config, kwargs) else: return config def to_dict(self) -> Dict[str, Any]: output = copy.deepcopy(self.__dict__) if hasattr(self.__class__, 'model_type'): output['model_type'] = self.__class__.model_type if '_auto_class' in output: del output['_auto_class'] if '_commit_hash' in output: del output['_commit_hash'] output['transformers_version'] = transformers_version_str output['optimum_version'] = __version__ self.dict_torch_dtype_to_str(output) return output # File: optimum-main/optimum/exporters/error_utils.py """""" class ShapeError(ValueError): pass class AtolError(ValueError): pass class OutputMatchError(ValueError): pass class NumberOfInputsMatchError(ValueError): pass class NumberOfOutputsMatchError(ValueError): pass class MinimumVersionError(ValueError): pass # File: optimum-main/optimum/exporters/onnx/__init__.py from typing import TYPE_CHECKING from transformers.utils import _LazyModule _import_structure = {'base': ['OnnxConfig', 'OnnxConfigWithLoss', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast'], 'config': ['TextDecoderOnnxConfig', 'TextEncoderOnnxConfig', 'TextSeq2SeqOnnxConfig'], 'convert': ['export', 'export_models', 'validate_model_outputs', 'validate_models_outputs', 'onnx_export_from_model'], 'utils': ['get_decoder_models_for_export', 'get_encoder_decoder_models_for_export', 'get_diffusion_models_for_export', 'MODEL_TYPES_REQUIRING_POSITION_IDS'], '__main__': ['main_export']} if TYPE_CHECKING: from .base import OnnxConfig, OnnxConfigWithLoss, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from .config import TextDecoderOnnxConfig, TextEncoderOnnxConfig, TextSeq2SeqOnnxConfig from .convert import export, export_models, validate_model_outputs, validate_models_outputs, onnx_export_from_model from .utils import get_decoder_models_for_export, get_encoder_decoder_models_for_export, get_diffusion_models_for_export, MODEL_TYPES_REQUIRING_POSITION_IDS from .__main__ import main_export else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: optimum-main/optimum/exporters/onnx/__main__.py """""" import argparse import warnings from pathlib import Path from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from packaging import version from requests.exceptions import ConnectionError as RequestsConnectionError from transformers import AutoConfig, AutoTokenizer from transformers.utils import is_torch_available from ...commands.export.onnx import parse_args_onnx from ...configuration_utils import _transformers_version from ...utils import DEFAULT_DUMMY_SHAPES, logging from ...utils.save_utils import maybe_load_preprocessors from ..tasks import TasksManager from .constants import SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED from .convert import onnx_export_from_model if is_torch_available(): import torch from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union if TYPE_CHECKING: from .base import OnnxConfig logger = logging.get_logger() logger.setLevel(logging.INFO) def main_export(model_name_or_path: str, output: Union[str, Path], task: str='auto', opset: Optional[int]=None, device: str='cpu', dtype: Optional[str]=None, fp16: Optional[bool]=False, optimize: Optional[str]=None, monolith: bool=False, no_post_process: bool=False, framework: Optional[str]=None, atol: Optional[float]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, trust_remote_code: bool=False, pad_token_id: Optional[int]=None, subfolder: str='', revision: str='main', force_download: bool=False, local_files_only: bool=False, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, for_ort: bool=False, do_validation: bool=True, model_kwargs: Optional[Dict[str, Any]]=None, custom_onnx_configs: Optional[Dict[str, 'OnnxConfig']]=None, fn_get_submodels: Optional[Callable]=None, use_subprocess: bool=False, _variant: str='default', library_name: Optional[str]=None, legacy: bool=False, no_dynamic_axes: bool=False, do_constant_folding: bool=True, **kwargs_shapes): if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if fp16: if dtype is not None: raise ValueError(f'Both the arguments `fp16` ({fp16}) and `dtype` ({dtype}) were specified in the ONNX export, which is not supported. Please specify only `dtype`. Possible options: "fp32" (default), "fp16", "bf16".') logger.warning('The argument `fp16` is deprecated in the ONNX export. Please use the argument `dtype="fp16"` instead, or `--dtype fp16` from the command-line.') dtype = 'fp16' elif dtype is None: dtype = 'fp32' if optimize == 'O4' and device != 'cuda': raise ValueError('Requested O4 optimization, but this optimization requires to do the export on GPU. Please pass the argument `--device cuda`.') if framework == 'tf' and fp16 or not is_torch_available(): raise ValueError('The --fp16 option is supported only for PyTorch.') if dtype == 'fp16' and device == 'cpu': raise ValueError('FP16 export is supported only when exporting on GPU. Please pass the option `--device cuda`.') if for_ort: logger.warning('The option --for-ort was passed, but its behavior is now the default in the ONNX exporter and passing it is not required anymore.') if task in ['stable-diffusion', 'stable-diffusion-xl']: logger.warning(f'The task `{task}` is deprecated and will be removed in a future release of Optimum. Please use one of the following tasks instead: `text-to-image`, `image-to-image`, `inpainting`.') original_task = task task = TasksManager.map_from_synonym(task) if framework is None: framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) if library_name is None: library_name = TasksManager.infer_library_from_model(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) torch_dtype = None if framework == 'pt': if dtype == 'fp16': torch_dtype = torch.float16 elif dtype == 'bf16': torch_dtype = torch.bfloat16 if task.endswith('-with-past') and monolith: task_non_past = task.replace('-with-past', '') raise ValueError(f'The task {task} is not compatible with the --monolith argument. Please either use `--task {task_non_past} --monolith`, or `--task {task}` without the monolith argument.') if task == 'auto': try: task = TasksManager.infer_task_from_model(model_name_or_path) except KeyError as e: raise KeyError(f"The task could not be automatically inferred. Please provide the argument --task with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}") except RequestsConnectionError as e: raise RequestsConnectionError(f"The task could not be automatically inferred as this is available only for models hosted on the Hugging Face Hub. Please provide the argument --task with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}") custom_architecture = False loading_kwargs = {} if library_name == 'transformers': config = AutoConfig.from_pretrained(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code) model_type = config.model_type.replace('_', '-') if model_type not in TasksManager._SUPPORTED_MODEL_TYPE: custom_architecture = True elif task not in TasksManager.get_supported_tasks_for_model_type(model_type, 'onnx', library_name=library_name): if original_task == 'auto': autodetected_message = ' (auto-detected)' else: autodetected_message = '' model_tasks = TasksManager.get_supported_tasks_for_model_type(model_type, exporter='onnx', library_name=library_name) raise ValueError(f"Asked to export a {model_type} model for the task {task}{autodetected_message}, but the Optimum ONNX exporter only supports the tasks {', '.join(model_tasks.keys())} for {model_type}. Please use a supported task. Please open an issue at https://github.com/huggingface/optimum/issues if you would like the task {task} to be supported in the ONNX export for {model_type}.") if model_type in SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED and _transformers_version >= version.parse('4.35.99'): loading_kwargs['attn_implementation'] = 'eager' model = TasksManager.get_model_from_task(task, model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, framework=framework, torch_dtype=torch_dtype, device=device, library_name=library_name, **loading_kwargs) needs_pad_token_id = task == 'text-classification' and getattr(model.config, 'pad_token_id', None) is None if needs_pad_token_id: if pad_token_id is not None: model.config.pad_token_id = pad_token_id else: tok = AutoTokenizer.from_pretrained(model_name_or_path) pad_token_id = getattr(tok, 'pad_token_id', None) if pad_token_id is None: raise ValueError('Could not infer the pad token id, which is needed in this case, please provide it with the --pad_token_id argument') model.config.pad_token_id = pad_token_id if hasattr(model.config, 'export_model_type'): model_type = model.config.export_model_type.replace('_', '-') else: model_type = model.config.model_type.replace('_', '-') if not custom_architecture and library_name != 'diffusers' and (task + '-with-past' in TasksManager.get_supported_tasks_for_model_type(model_type, 'onnx', library_name=library_name)): if original_task == 'auto' and (not monolith): task = task + '-with-past' else: logger.info(f'The task `{task}` was manually specified, and past key values will not be reused in the decoding. if needed, please pass `--task {task}-with-past` to export using the past key values.') model.config.use_cache = False if task.endswith('with-past'): model.config.use_cache = True if original_task == 'auto': synonyms_for_task = sorted(TasksManager.synonyms_for_task(task)) if synonyms_for_task: synonyms_for_task = ', '.join(synonyms_for_task) possible_synonyms = f' (possible synonyms are: {synonyms_for_task})' else: possible_synonyms = '' logger.info(f'Automatic task detection to {task}{possible_synonyms}.') preprocessors = maybe_load_preprocessors(model_name_or_path, subfolder=subfolder, trust_remote_code=trust_remote_code) onnx_export_from_model(model=model, output=output, opset=opset, optimize=optimize, monolith=monolith, no_post_process=no_post_process, atol=atol, do_validation=do_validation, model_kwargs=model_kwargs, custom_onnx_configs=custom_onnx_configs, fn_get_submodels=fn_get_submodels, _variant=_variant, legacy=legacy, preprocessors=preprocessors, device=device, no_dynamic_axes=no_dynamic_axes, task=task, use_subprocess=use_subprocess, do_constant_folding=do_constant_folding, **kwargs_shapes) def main(): parser = argparse.ArgumentParser('Hugging Face Optimum ONNX exporter') parse_args_onnx(parser) args = parser.parse_args() input_shapes = {} for input_name in DEFAULT_DUMMY_SHAPES.keys(): input_shapes[input_name] = getattr(args, input_name) main_export(model_name_or_path=args.model, output=args.output, task=args.task, opset=args.opset, device=args.device, fp16=args.fp16, optimize=args.optimize, monolith=args.monolith, no_post_process=args.no_post_process, framework=args.framework, atol=args.atol, cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code, pad_token_id=args.pad_token_id, for_ort=args.for_ort, library_name=args.library_name, legacy=args.legacy, do_constant_folding=not args.no_constant_folding, **input_shapes) if __name__ == '__main__': main() # File: optimum-main/optimum/exporters/onnx/base.py """""" import copy import enum import gc import inspect import itertools import os import re from abc import ABC, abstractmethod from collections import OrderedDict from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np import onnx from transformers.utils import is_accelerate_available, is_torch_available from ...onnx import remove_duplicate_weights_from_tied_info if is_torch_available(): import torch.nn as nn from ...onnx import merge_decoders from ...utils import DEFAULT_DUMMY_SHAPES, DummyInputGenerator, DummyLabelsGenerator, DummySeq2SeqPastKeyValuesGenerator, is_diffusers_available, logging from ...utils import TORCH_MINIMUM_VERSION as GLOBAL_MIN_TORCH_VERSION from ...utils import TRANSFORMERS_MINIMUM_VERSION as GLOBAL_MIN_TRANSFORMERS_VERSION from ...utils.doc import add_dynamic_docstring from ...utils.import_utils import check_if_transformers_greater, is_onnx_available, is_onnxruntime_available from ..base import ExportConfig from .constants import ONNX_DECODER_MERGED_NAME, ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME from .model_patcher import ModelPatcher, Seq2SeqModelPatcher if is_accelerate_available(): from accelerate.utils import find_tied_parameters if TYPE_CHECKING: from transformers import PretrainedConfig, PreTrainedModel, TFPreTrainedModel if is_diffusers_available(): from diffusers import ModelMixin from .model_patcher import PatchingSpec logger = logging.get_logger(__name__) GENERATE_DUMMY_DOCSTRING = '\n Generates the dummy inputs necessary for tracing the model. If not explicitely specified, default input shapes are used.\n\n Args:\n framework (`str`, defaults to `"pt"`):\n The framework for which to create the dummy inputs.\n batch_size (`int`, defaults to {batch_size}):\n The batch size to use in the dummy inputs.\n sequence_length (`int`, defaults to {sequence_length}):\n The sequence length to use in the dummy inputs.\n num_choices (`int`, defaults to {num_choices}):\n The number of candidate answers provided for multiple choice task.\n image_width (`int`, defaults to {width}):\n The width to use in the dummy inputs for vision tasks.\n image_height (`int`, defaults to {height}):\n The height to use in the dummy inputs for vision tasks.\n num_channels (`int`, defaults to {num_channels}):\n The number of channels to use in the dummpy inputs for vision tasks.\n feature_size (`int`, defaults to {feature_size}):\n The number of features to use in the dummpy inputs for audio tasks in case it is not raw audio.\n This is for example the number of STFT bins or MEL bins.\n nb_max_frames (`int`, defaults to {nb_max_frames}):\n The number of frames to use in the dummpy inputs for audio tasks in case the input is not raw audio.\n audio_sequence_length (`int`, defaults to {audio_sequence_length}):\n The number of frames to use in the dummpy inputs for audio tasks in case the input is raw audio.\n\n Returns:\n `Dict`: A dictionary mapping the input names to dummy tensors in the proper framework format.\n' class OnnxConfig(ExportConfig, ABC): NORMALIZED_CONFIG_CLASS = None DUMMY_INPUT_GENERATOR_CLASSES = () DEFAULT_ONNX_OPSET = 11 ATOL_FOR_VALIDATION: Union[float, Dict[str, float]] = 1e-05 MIN_TORCH_VERSION = GLOBAL_MIN_TORCH_VERSION MIN_TRANSFORMERS_VERSION = GLOBAL_MIN_TRANSFORMERS_VERSION PATCHING_SPECS: Optional[List['PatchingSpec']] = None VARIANTS = {'default': 'The default ONNX variant.'} DEFAULT_VARIANT = 'default' _TASK_TO_COMMON_OUTPUTS = {'audio-classification': OrderedDict({'logits': {0: 'batch_size'}}), 'audio-frame-classification': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'automatic-speech-recognition': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'audio-xvector': OrderedDict({'logits': {0: 'batch_size'}, 'embeddings': {0: 'batch_size'}}), 'depth-estimation': OrderedDict({'predicted_depth': {0: 'batch_size', 1: 'height', 2: 'width'}}), 'document-question-answering': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'feature-extraction': OrderedDict({'last_hidden_state': {0: 'batch_size', 1: 'sequence_length'}}), 'fill-mask': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'image-classification': OrderedDict({'logits': {0: 'batch_size'}}), 'image-segmentation': OrderedDict({'logits': {0: 'batch_size', 1: 'num_labels', 2: 'height', 3: 'width'}}), 'image-to-text': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'image-to-image': OrderedDict({'reconstruction': {0: 'batch_size', 1: 'num_channels', 2: 'height', 3: 'width'}}), 'mask-generation': OrderedDict({'logits': {0: 'batch_size'}}), 'masked-im': OrderedDict({'reconstruction' if check_if_transformers_greater('4.29.0') else 'logits': {0: 'batch_size'}}), 'multiple-choice': OrderedDict({'logits': {0: 'batch_size', 1: 'num_choices'}}), 'object-detection': OrderedDict({'logits': {0: 'batch_size', 1: 'num_queries'}, 'pred_boxes': {0: 'batch_size', 1: 'num_queries'}}), 'question-answering': OrderedDict({'start_logits': {0: 'batch_size', 1: 'sequence_length'}, 'end_logits': {0: 'batch_size', 1: 'sequence_length'}}), 'semantic-segmentation': OrderedDict({'logits': {0: 'batch_size', 1: 'num_labels', 2: 'height', 3: 'width'}}), 'text2text-generation': OrderedDict({'logits': {0: 'batch_size', 1: 'decoder_sequence_length'}}), 'text-classification': OrderedDict({'logits': {0: 'batch_size'}}), 'text-generation': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'token-classification': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'visual-question-answering': OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}), 'zero-shot-image-classification': OrderedDict({'logits_per_image': {0: 'image_batch_size', 1: 'text_batch_size'}, 'logits_per_text': {0: 'text_batch_size', 1: 'image_batch_size'}, 'text_embeds': {0: 'text_batch_size'}, 'image_embeds': {0: 'image_batch_size'}}), 'zero-shot-object-detection': OrderedDict({'logits': {0: 'batch_size', 1: 'num_queries'}, 'pred_boxes': {0: 'batch_size', 1: 'num_queries'}, 'text_embeds': {0: 'text_batch_size'}, 'image_embeds': {0: 'image_batch_size'}})} def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', preprocessors: Optional[List[Any]]=None, int_dtype: str='int64', float_dtype: str='fp32', legacy: bool=False): self.task = task self.int_dtype = int_dtype self.float_dtype = float_dtype self._config = config self._preprocessors = preprocessors self._normalized_config = self.NORMALIZED_CONFIG_CLASS(self._config) self.variant = 'default' self.legacy = legacy def _create_dummy_input_generator_classes(self, **kwargs) -> List[DummyInputGenerator]: first_inputs_gen = self.DUMMY_INPUT_GENERATOR_CLASSES[0](self.task, self._normalized_config, **kwargs) dummy_inputs_generators = [cls_(self.task, self._normalized_config, **kwargs) for cls_ in self.DUMMY_INPUT_GENERATOR_CLASSES[1:]] dummy_inputs_generators.insert(0, first_inputs_gen) return dummy_inputs_generators @property @abstractmethod def inputs(self) -> Dict[str, Dict[int, str]]: raise NotImplementedError() @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = self._TASK_TO_COMMON_OUTPUTS[self.task] return copy.deepcopy(common_outputs) @property def variant(self) -> str: return self._variant @variant.setter def variant(self, value: str): if value == 'default' and hasattr(self, 'DEFAULT_VARIANT'): value = self.DEFAULT_VARIANT if value not in self.VARIANTS: raise ValueError(f'The variant {value} is not supported for the ONNX config {self.__class__.__name__}.') self._variant = value def fix_dynamic_axes(self, model_path: 'Path', device: str='cpu', dtype: Optional[str]=None, input_shapes: Optional[Dict]=None): if not (is_onnx_available() and is_onnxruntime_available()): raise RuntimeError('The onnx and onnxruntime packages are necessary to fix the dynamic shapes of the exported model. You can install them by doing: pip install onnx onnxruntime') import onnx from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions allowed_dynamic_axes = set() for input_ in self.inputs.values(): allowed_dynamic_axes |= set(input_.values()) for output in self.outputs.values(): allowed_dynamic_axes |= set(output.values()) if device.startswith('cuda'): providers = ['CUDAExecutionProvider'] else: providers = ['CPUExecutionProvider'] session_options = SessionOptions() session_options.graph_optimization_level = GraphOptimizationLevel.ORT_DISABLE_ALL session = InferenceSession(model_path.as_posix(), providers=providers, sess_options=session_options) onnx_input_names = [inp.name for inp in session.get_inputs()] to_fix = [] for (output_idx, node) in enumerate(session.get_outputs()): for (idx, axis) in enumerate(node.shape): if isinstance(axis, str) and axis not in allowed_dynamic_axes: to_fix.append((output_idx, idx)) if to_fix: if input_shapes is None: input_shapes = {} dummy_inputs = self.generate_dummy_inputs(framework='np', **input_shapes) dummy_inputs = self.generate_dummy_inputs_for_validation(dummy_inputs, onnx_input_names=onnx_input_names) onnx_inputs = {} for (name, value) in dummy_inputs.items(): if isinstance(value, (list, tuple)): value = self.flatten_output_collection_property(name, value) onnx_inputs.update(dict(value.items())) else: onnx_inputs[name] = value for (name, value) in onnx_inputs.items(): if value.dtype == np.float32 and dtype == 'fp16': onnx_inputs[name] = onnx_inputs[name].astype(np.float16) outputs = session.run(None, onnx_inputs) del session onnx_model = onnx.load(model_path.as_posix(), load_external_data=False) for (output_idx, dim_idx) in to_fix: dims = onnx_model.graph.output[output_idx].type.tensor_type.shape.dim dims[dim_idx].dim_value = outputs[output_idx].shape[dim_idx] onnx.save(onnx_model, model_path.as_posix(), convert_attribute=True) del onnx_model gc.collect() def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> ModelPatcher: return ModelPatcher(self, model, model_kwargs=model_kwargs) @property def values_override(self) -> Optional[Dict[str, Any]]: if hasattr(self._config, 'use_cache'): return {'use_cache': False} return None @property def is_transformers_support_available(self) -> bool: return check_if_transformers_greater(self.MIN_TRANSFORMERS_VERSION) @property def is_torch_support_available(self) -> bool: if is_torch_available(): from ...utils import torch_version return torch_version >= self.MIN_TORCH_VERSION return False @property def torch_to_onnx_input_map(self) -> Dict[str, str]: return {} @property def torch_to_onnx_output_map(self) -> Dict[str, str]: return {} def rename_ambiguous_inputs(self, inputs) -> Dict[str, Dict[int, str]]: return inputs def ordered_inputs(self, model: Union['PreTrainedModel', 'TFPreTrainedModel']) -> Dict[str, Dict[int, str]]: inputs = self.inputs inputs = self.rename_ambiguous_inputs(inputs) ordered_inputs = {} if hasattr(model, 'forward'): sig = inspect.signature(model.forward) else: sig = inspect.signature(model.call) for param in sig.parameters: param_regex = re.compile(f'{param}(\\..*)?$') to_insert = [] for (name, dynamic_axes) in inputs.items(): if re.match(param_regex, name): to_insert.append((name, dynamic_axes)) for (name, dynamic_axes) in to_insert: name = self.torch_to_onnx_input_map.get(name, name) ordered_inputs[name] = dynamic_axes return ordered_inputs @add_dynamic_docstring(text=GENERATE_DUMMY_DOCSTRING, dynamic_elements=DEFAULT_DUMMY_SHAPES) def generate_dummy_inputs(self, framework: str='pt', **kwargs) -> Dict: dummy_inputs_generators = self._create_dummy_input_generator_classes(**kwargs) dummy_inputs = {} for input_name in self.inputs: input_was_inserted = False for dummy_input_gen in dummy_inputs_generators: if dummy_input_gen.supports_input(input_name): dummy_inputs[input_name] = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) input_was_inserted = True break if not input_was_inserted: raise RuntimeError(f'Could not generate dummy input for "{input_name}". Try adding a proper dummy input generator to the model ONNX config.') return dummy_inputs @classmethod def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: if isinstance(field[0], (list, tuple)): return {f'{name}.{idx}': item for (idx, item) in enumerate(itertools.chain.from_iterable(field))} else: return {f'{name}.{idx}': item for (idx, item) in enumerate(field)} def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: return reference_model_inputs def post_process_exported_models(self, path: 'Path', models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], onnx_files_subpaths: List[str]): first_key = next(iter(models_and_onnx_configs)) if is_torch_available() and isinstance(models_and_onnx_configs[first_key][0], nn.Module): if is_accelerate_available(): logger.info('Deduplicating shared (tied) weights...') for (subpath, key) in zip(onnx_files_subpaths, models_and_onnx_configs): torch_model = models_and_onnx_configs[key][0] tied_params = find_tied_parameters(torch_model) if len(tied_params) > 0: onnx_model = onnx.load(os.path.join(path, subpath)) remove_duplicate_weights_from_tied_info(onnx_model, torch_model, tied_params, save_path=os.path.join(path, subpath)) else: logger.warning('Weight deduplication check in the ONNX export requires accelerate. Please install accelerate to run it.') return (models_and_onnx_configs, onnx_files_subpaths) class OnnxConfigWithPast(OnnxConfig, ABC): PAD_ATTENTION_MASK_TO_PAST: bool = False SUPPORTS_PAST: bool = True def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, preprocessors: Optional[List[Any]]=None, legacy: bool=False): self.use_past = use_past self.use_past_in_inputs = use_past_in_inputs self.is_merged = False self.use_cache_branch = None super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors, legacy=legacy) @property def outputs(self) -> Dict[str, Dict[int, str]]: if not self.use_past_in_inputs: common_outputs = super().outputs elif self.task == 'feature-extraction': common_outputs = OrderedDict({'last_hidden_state': {0: 'batch_size'}}) else: common_outputs = OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}) if self.use_past: self.add_past_key_values(common_outputs, direction='outputs') return common_outputs @property def values_override(self) -> Optional[Dict[str, Any]]: if hasattr(self._config, 'use_cache'): return {'use_cache': self.use_past} @add_dynamic_docstring(text=GENERATE_DUMMY_DOCSTRING, dynamic_elements=DEFAULT_DUMMY_SHAPES) def generate_dummy_inputs(self, framework: str='pt', **kwargs): dummy_inputs_generators = self._create_dummy_input_generator_classes(**kwargs) dummy_inputs = {} input_names = [key for key in self.inputs.keys() if not key.startswith('past_key_values')] if self.use_past_in_inputs and self.use_cache_branch is not False: input_names.append('past_key_values') for input_name in input_names: input_was_inserted = False for dummy_input_gen in dummy_inputs_generators: if dummy_input_gen.supports_input(input_name): dummy_inputs[input_name] = self.overwrite_shape_and_generate_input(dummy_input_gen, input_name, framework, input_shapes=kwargs) input_was_inserted = True break if not input_was_inserted: raise RuntimeError(f'Could not generate dummy input for "{input_name}". Try adding a proper dummy input generator to the model ONNX config.') if self.use_past_in_inputs and self.PAD_ATTENTION_MASK_TO_PAST and (self.use_cache_branch is not False) and ('attention_mask' in dummy_inputs): past_present_length = dummy_inputs['input_ids'].shape[1] + dummy_inputs['past_key_values'][0][1].shape[-2] dummy_inputs['attention_mask'] = DummyInputGenerator.pad_input_on_dim(dummy_inputs['attention_mask'], desired_length=past_present_length, dim=1, dtype=dummy_inputs['attention_mask'].dtype) if self.use_past_in_inputs and self.use_cache_branch is not False and ('decoder_attention_mask' in dummy_inputs): past_length = dummy_inputs['past_key_values'][0][0].shape[2] dummy_inputs['decoder_attention_mask'] = DummyInputGenerator.pad_input_on_dim(dummy_inputs['decoder_attention_mask'], desired_length=past_length + 1, dim=1, dtype=dummy_inputs['decoder_attention_mask'].dtype) return dummy_inputs def overwrite_shape_and_generate_input(self, dummy_input_gen: 'DummyInputGenerator', input_name: str, framework: str, input_shapes: Dict): if self.use_past and self.use_past_in_inputs and (self.use_cache_branch is not False) and (input_name in ['decoder_input_ids', 'input_ids', 'position_ids']) and (self.task == 'text-generation' and self.legacy or self.task != 'text-generation'): sequence_length = dummy_input_gen.sequence_length dummy_input_gen.sequence_length = 1 dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) dummy_input_gen.sequence_length = sequence_length else: dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) return dummy_input def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') if direction == 'inputs': decoder_sequence_name = 'past_sequence_length' name = 'past_key_values' else: decoder_sequence_name = 'past_sequence_length + 1' name = 'present' for i in range(self._normalized_config.num_layers): inputs_or_outputs[f'{name}.{i}.key'] = {0: 'batch_size', 2: decoder_sequence_name} inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch_size', 2: decoder_sequence_name} def flatten_past_key_values(self, flattened_output, name, idx, t): flattened_output[f'{name}.{idx}.key'] = t[0] flattened_output[f'{name}.{idx}.value'] = t[1] def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]: flattened_output = {} if name in ['present', 'past_key_values']: for (idx, t) in enumerate(field): self.flatten_past_key_values(flattened_output, name, idx, t) else: flattened_output = super().flatten_output_collection_property(name, field) return flattened_output def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: if self.is_merged is True and self.use_cache_branch is True: reference_model_inputs['use_cache_branch'] = DummyInputGenerator.constant_tensor(shape=[1], value=True) elif self.is_merged is True and self.use_cache_branch is False: reference_model_inputs['use_cache_branch'] = DummyInputGenerator.constant_tensor(shape=[1], value=False) batch_size = reference_model_inputs['input_ids'].shape[0] pkv_generator = self.DUMMY_PKV_GENERATOR_CLASS(task=self.task, normalized_config=self._normalized_config, sequence_length=1, batch_size=batch_size) reference_model_inputs['past_key_values'] = pkv_generator.generate('past_key_values', framework='pt', int_dtype=self.int_dtype, float_dtype=self.float_dtype) return reference_model_inputs class ConfigBehavior(str, enum.Enum): MONOLITH = 'monolith' ENCODER = 'encoder' DECODER = 'decoder' class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast): DUMMY_PKV_GENERATOR_CLASS = DummySeq2SeqPastKeyValuesGenerator def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, behavior: ConfigBehavior=ConfigBehavior.MONOLITH, preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, preprocessors=preprocessors, legacy=legacy) self._behavior = behavior if self._behavior is ConfigBehavior.ENCODER: self.task = 'feature-extraction' self.use_past_in_inputs = False def with_behavior(self, behavior: Union[str, ConfigBehavior], use_past: bool=False, use_past_in_inputs: bool=False) -> 'OnnxSeq2SeqConfigWithPast': if isinstance(behavior, str) and (not isinstance(behavior, ConfigBehavior)): behavior = ConfigBehavior(behavior) onnx_config = self.__class__(self._config, task=self.task, int_dtype=self.int_dtype, float_dtype=self.float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, behavior=behavior, preprocessors=self._preprocessors, legacy=self.legacy) onnx_config.variant = self.variant return onnx_config @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = super(OnnxConfigWithPast, self).outputs for (name, axes_names) in common_outputs.items(): if self._behavior is ConfigBehavior.ENCODER or 'encoder' in name: sequence_name = 'encoder_sequence_length' else: sequence_name = 'decoder_sequence_length' new_axes_names = {} for (axis_idx, axis_name) in axes_names.items(): if 'sequence' in axis_name: if self.use_past_in_inputs is False or self.is_merged is True: new_axes_names[axis_idx] = sequence_name else: new_axes_names[axis_idx] = '1' else: new_axes_names[axis_idx] = axis_name common_outputs[name] = new_axes_names if self.use_past: self.add_past_key_values(common_outputs, direction='outputs') return common_outputs def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') if direction == 'inputs': decoder_sequence_name = 'past_decoder_sequence_length' name = 'past_key_values' else: decoder_sequence_name = 'past_decoder_sequence_length + 1' name = 'present' for i in range(self._normalized_config.decoder_num_layers): inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch_size', 2: decoder_sequence_name} inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch_size', 2: decoder_sequence_name} if self.is_merged is True or (self._behavior is ConfigBehavior.DECODER and (not self.use_past_in_inputs)) or direction == 'inputs': inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch_size', 2: 'encoder_sequence_length_out'} inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch_size', 2: 'encoder_sequence_length_out'} def flatten_past_key_values(self, flattened_output, name, idx, t): if len(t) not in [2, 4]: raise ValueError('past_key_values to flatten should be of length 2 (self-attention only) or 4 (self and cross attention).') flattened_output[f'{name}.{idx}.decoder.key'] = t[0] flattened_output[f'{name}.{idx}.decoder.value'] = t[1] if len(t) == 4: flattened_output[f'{name}.{idx}.encoder.key'] = t[2] flattened_output[f'{name}.{idx}.encoder.value'] = t[3] def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> ModelPatcher: return Seq2SeqModelPatcher(self, model, model_kwargs=model_kwargs) def post_process_exported_models(self, path: Path, models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], onnx_files_subpaths: List[str]): (models_and_onnx_configs, onnx_files_subpaths) = super().post_process_exported_models(path, models_and_onnx_configs, onnx_files_subpaths) if len(onnx_files_subpaths) >= 3 and self.use_past is True: decoder_path = Path(path, onnx_files_subpaths[1]) decoder_with_past_path = Path(path, onnx_files_subpaths[2]) decoder_merged_path = Path(path, ONNX_DECODER_MERGED_NAME + '.onnx') try: merge_decoders(decoder=decoder_path, decoder_with_past=decoder_with_past_path, save_path=decoder_merged_path, strict=False) except Exception as e: raise Exception(f'Unable to merge decoders. Detailed error: {e}') encoder_path = onnx_files_subpaths[0] onnx_files_subpaths_new = [encoder_path, decoder_merged_path.name, decoder_merged_path.name] onnx_files_subpaths_new.extend(onnx_files_subpaths[3:]) models_and_onnx_configs[ONNX_DECODER_NAME][1].is_merged = True models_and_onnx_configs[ONNX_DECODER_NAME][1].use_cache_branch = False models_and_onnx_configs[ONNX_DECODER_NAME][1].use_past_in_inputs = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1].use_cache_branch = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1].is_merged = True else: onnx_files_subpaths_new = onnx_files_subpaths return (models_and_onnx_configs, onnx_files_subpaths_new) def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: if self._behavior is ConfigBehavior.DECODER: if 'decoder_input_ids' in reference_model_inputs: reference_model_inputs['input_ids'] = reference_model_inputs.pop('decoder_input_ids') if 'attention_mask' in reference_model_inputs: reference_model_inputs['encoder_attention_mask'] = reference_model_inputs.pop('attention_mask') if onnx_input_names is not None: if 'encoder_outputs' in reference_model_inputs: if 'encoder_hidden_states' in onnx_input_names: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] else: reference_model_inputs.pop('encoder_outputs') elif 'encoder_outputs' in reference_model_inputs: if self.use_past_in_inputs is False or self.is_merged: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] else: reference_model_inputs.pop('encoder_outputs') return super().generate_dummy_inputs_for_validation(reference_model_inputs) class OnnxConfigWithLoss(OnnxConfig, ABC): _tasks_to_extra_inputs = {'feature-extraction': {'labels': {0: 'batch_size'}}, 'fill-mask': {'labels': {0: 'batch_size', 1: 'sequence_length'}}, 'text-generation': {'labels': {0: 'batch_size', 1: 'sequence_length'}}, 'text-generation-with-past': {'labels': {0: 'batch_size'}}, 'text2text-generation': {'labels': {0: 'batch_size', 1: 'sequence_length'}}, 'text2text-generation-with-past': {'labels': {0: 'batch_size'}}, 'text-classification': {'labels': {0: 'batch_size'}}, 'token-classification': {'labels': {0: 'batch_size', 1: 'sequence_length'}}, 'multiple-choice': {'labels': {0: 'batch_size'}}, 'question-answering': {'start_positions': {0: 'batch_size'}, 'end_positions': {0: 'batch_size'}}, 'image-classification': {'labels': {0: 'batch_size'}}} _tasks_to_extra_outputs = {'feature-extraction': OrderedDict({'loss': {}})} DUMMY_EXTRA_INPUT_GENERATOR_CLASSES = (DummyLabelsGenerator,) def __init__(self, config: OnnxConfig, int_dtype: str='int64', float_dtype: str='fp32', legacy: bool=False): self._onnx_config = config self.task = self._onnx_config.task self.int_dtype = int_dtype self.float_dtype = float_dtype self._normalized_config = self._onnx_config._normalized_config self.PATCHING_SPECS = self._onnx_config.PATCHING_SPECS self.variant = 'default' self.legacy = legacy @classmethod def from_onnx_config(cls, config: OnnxConfig) -> 'OnnxConfigWithLoss': return cls(config) @property def inputs(self) -> Dict[str, Dict[int, str]]: inputs = self._onnx_config.inputs inputs.update(self._tasks_to_extra_inputs[self.task]) return inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = self._onnx_config.outputs extra_outputs = self._tasks_to_extra_outputs['feature-extraction'] common_outputs.update(extra_outputs) for key in reversed(extra_outputs.keys()): common_outputs.move_to_end(key, last=False) return copy.deepcopy(common_outputs) def generate_dummy_inputs(self, framework: str='pt', **kwargs): dummy_inputs = self._onnx_config.generate_dummy_inputs(framework=framework, **kwargs) (input_name, _) = next(iter(self._onnx_config.inputs.items())) batch_size = dummy_inputs[input_name].shape[0] if isinstance(self._onnx_config, OnnxConfigWithPast) and self._onnx_config.use_past_in_inputs is True and (self.task != 'text-generation'): kwargs['sequence_length'] = 1 else: for (input_name, dynamic_axes) in self._tasks_to_extra_inputs[self.task].items(): if 'sequence_length' in dynamic_axes.values(): kwargs['sequence_length'] = DEFAULT_DUMMY_SHAPES['sequence_length'] kwargs['num_labels'] = self._onnx_config._config.num_labels dummy_inputs_generators = [cls_(self.task, self._normalized_config, batch_size=batch_size, **kwargs) for cls_ in self.DUMMY_EXTRA_INPUT_GENERATOR_CLASSES] for input_name in self._tasks_to_extra_inputs[self.task]: input_was_inserted = False for dummy_input_gen in dummy_inputs_generators: if dummy_input_gen.supports_input(input_name): dummy_inputs[input_name] = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) input_was_inserted = True break if not input_was_inserted: raise RuntimeError(f'Could not generate dummy input for "{input_name}". Try adding a proper dummy input generator to the model ONNX config.') return dummy_inputs def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: return self._onnx_config.generate_dummy_inputs_for_validation(reference_model_inputs) def flatten_decoder_past_key_values(self, flattened_output, name, idx, t): flattened_output[f'{name}.{idx}.key'] = t[0] flattened_output[f'{name}.{idx}.value'] = t[1] def flatten_seq2seq_past_key_values(self, flattened_output, name, idx, t): if len(t) not in [2, 4]: raise ValueError('past_key_values to flatten should be of length 2 (self-attention only) or 4 (self and cross attention).') if len(t) == 2: flattened_output[f'{name}.{idx}.decoder.key'] = t[0] flattened_output[f'{name}.{idx}.decoder.value'] = t[1] if len(t) == 4: flattened_output[f'{name}.{idx}.encoder.key'] = t[2] flattened_output[f'{name}.{idx}.encoder.value'] = t[3] def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]: flattened_output = {} if name in ['present', 'past_key_values']: if 'text-generation' in self.task: for (idx, t) in enumerate(field): self.flatten_decoder_past_key_values(flattened_output, name, idx, t) elif 'text2text-generation' in self.task: for (idx, t) in enumerate(field): self.flatten_seq2seq_past_key_values(flattened_output, name, idx, t) else: flattened_output = super().flatten_output_collection_property(name, field) return flattened_output @property def torch_to_onnx_input_map(self) -> Dict[str, str]: return self._onnx_config.torch_to_onnx_input_map @property def torch_to_onnx_output_map(self) -> Dict[str, str]: return self._onnx_config.torch_to_onnx_output_map @property def values_override(self) -> Optional[Dict[str, Any]]: return self._onnx_config.values_override # File: optimum-main/optimum/exporters/onnx/config.py """""" from collections import OrderedDict from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union from transformers.utils import is_tf_available from ...onnx import merge_decoders from ...utils import DummyAudioInputGenerator, DummyBboxInputGenerator, DummyInputGenerator, DummyPastKeyValuesGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator, DummyTextInputGenerator, DummyVisionInputGenerator, is_diffusers_available, logging from .base import ConfigBehavior, OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from .constants import ONNX_DECODER_MERGED_NAME, ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME from .model_patcher import DecoderModelPatcher if TYPE_CHECKING: from transformers import PretrainedConfig, PreTrainedModel from .model_patcher import ModelPatcher if is_tf_available(): from transformers import TFPreTrainedModel if is_diffusers_available(): from diffusers import ModelMixin logger = logging.get_logger(__name__) class TextEncoderOnnxConfig(OnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator,) class TextDecoderOnnxConfig(OnnxConfigWithPast): PAD_ATTENTION_MASK_TO_PAST = True DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummyPastKeyValuesGenerator) DUMMY_PKV_GENERATOR_CLASS = DummyPastKeyValuesGenerator def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, preprocessors=preprocessors, legacy=legacy) @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.use_past_in_inputs: common_inputs = {'input_ids': {0: 'batch_size', 1: 'sequence_length'}} self.add_past_key_values(common_inputs, direction='inputs') common_inputs['attention_mask'] = {0: 'batch_size', 1: 'past_sequence_length + 1'} else: common_inputs = {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}} return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: if self.is_merged is False: common_outputs = super().outputs else: common_outputs = OrderedDict({'logits': {0: 'batch_size', 1: 'sequence_length'}}) self.add_past_key_values(common_outputs, direction='outputs') return common_outputs def post_process_exported_models(self, path: Path, models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], onnx_files_subpaths: List[str]): (models_and_onnx_configs, onnx_files_subpaths) = super().post_process_exported_models(path, models_and_onnx_configs, onnx_files_subpaths) if self.use_past is True and len(models_and_onnx_configs) == 2: decoder_path = Path(path, onnx_files_subpaths[0]) decoder_with_past_path = Path(path, onnx_files_subpaths[1]) decoder_merged_path = Path(path, ONNX_DECODER_MERGED_NAME + '.onnx') try: merge_decoders(decoder=decoder_path, decoder_with_past=decoder_with_past_path, save_path=decoder_merged_path) except Exception as e: raise Exception(f'Unable to merge decoders. Detailed error: {e}') onnx_files_subpaths = [decoder_merged_path.name, decoder_merged_path.name] models_and_onnx_configs[ONNX_DECODER_NAME][1].is_merged = True models_and_onnx_configs[ONNX_DECODER_NAME][1].use_cache_branch = False models_and_onnx_configs[ONNX_DECODER_NAME][1].use_past_in_inputs = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1].use_cache_branch = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1].is_merged = True return (models_and_onnx_configs, onnx_files_subpaths) def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return DecoderModelPatcher(self, model, model_kwargs=model_kwargs) class TextDecoderWithPositionIdsOnnxConfig(TextDecoderOnnxConfig): @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = super().inputs if not self.legacy and self.task in ['text-generation', 'feature-extraction']: common_inputs['position_ids'] = {0: 'batch_size', 1: 'sequence_length'} return common_inputs class TextSeq2SeqOnnxConfig(OnnxSeq2SeqConfigWithPast): DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator) @property def torch_to_onnx_input_map(self) -> Dict[str, str]: if self._behavior is ConfigBehavior.DECODER: return {'decoder_input_ids': 'input_ids', 'encoder_outputs': 'encoder_hidden_states', 'attention_mask': 'encoder_attention_mask'} return {} @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {} if self._behavior is not ConfigBehavior.DECODER: common_inputs['input_ids'] = {0: 'batch_size', 1: 'encoder_sequence_length'} common_inputs['attention_mask'] = {0: 'batch_size', 1: 'encoder_sequence_length'} if self._behavior is not ConfigBehavior.ENCODER: if self.use_past_in_inputs: common_inputs['decoder_input_ids'] = {0: 'batch_size'} self.add_past_key_values(common_inputs, direction='inputs') else: common_inputs['decoder_input_ids'] = {0: 'batch_size', 1: 'decoder_sequence_length'} if self._behavior is ConfigBehavior.DECODER: common_inputs['encoder_outputs'] = {0: 'batch_size', 1: 'encoder_sequence_length'} return common_inputs def _create_dummy_input_generator_classes(self, **kwargs) -> List['DummyInputGenerator']: dummy_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[0](self.task, self._normalized_config, **kwargs) dummy_decoder_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[1](self.task, self._normalized_config, **kwargs) dummy_seq2seq_past_key_values_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[2](self.task, self._normalized_config, encoder_sequence_length=dummy_text_input_generator.sequence_length, **kwargs) dummy_inputs_generators = [dummy_text_input_generator, dummy_decoder_text_input_generator, dummy_seq2seq_past_key_values_generator] return dummy_inputs_generators class VisionOnnxConfig(OnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator,) class TextAndVisionOnnxConfig(OnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummyVisionInputGenerator, DummyBboxInputGenerator) class AudioOnnxConfig(OnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyAudioInputGenerator,) @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_values': {0: 'batch_size', 1: 'sequence_length'}} class AudioToTextOnnxConfig(OnnxSeq2SeqConfigWithPast): DUMMY_INPUT_GENERATOR_CLASSES = (DummyAudioInputGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator) @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {} if self._behavior is not ConfigBehavior.DECODER: common_inputs['input_features'] = {0: 'batch_size', 1: 'feature_size', 2: 'encoder_sequence_length'} if self._behavior is not ConfigBehavior.ENCODER: if self.use_past_in_inputs: common_inputs['decoder_input_ids'] = {0: 'batch_size'} self.add_past_key_values(common_inputs, direction='inputs') else: common_inputs['decoder_input_ids'] = {0: 'batch_size', 1: 'decoder_sequence_length'} if self._behavior is ConfigBehavior.DECODER: common_inputs['encoder_outputs'] = {0: 'batch_size', 1: 'encoder_sequence_length'} return common_inputs @property def torch_to_onnx_input_map(self) -> Dict[str, str]: if self._behavior is ConfigBehavior.DECODER: return {'decoder_input_ids': 'input_ids', 'encoder_outputs': 'encoder_hidden_states', 'attention_mask': 'encoder_attention_mask'} return {} class EncoderDecoderBaseOnnxConfig(OnnxSeq2SeqConfigWithPast): DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator,) def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, behavior: ConfigBehavior=ConfigBehavior.MONOLITH, preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, behavior=behavior, preprocessors=preprocessors, legacy=legacy) from ..tasks import TasksManager self.is_decoder_with_past = False encoder_onnx_config_constructor = TasksManager.get_exporter_config_constructor(exporter='onnx', task='feature-extraction', model_type=config.encoder.model_type, library_name='transformers') self._encoder_onnx_config = encoder_onnx_config_constructor(config.encoder, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors) self._normalized_config.ENCODER_NORMALIZED_CONFIG_CLASS = self._encoder_onnx_config._normalized_config decoder_onnx_config_constructor = TasksManager.get_exporter_config_constructor(exporter='onnx', task='feature-extraction', model_type=config.decoder.model_type, library_name='transformers') kwargs = {} if issubclass(decoder_onnx_config_constructor.func, OnnxConfigWithPast): self.is_decoder_with_past = True kwargs['use_past'] = use_past else: self.use_past = False if use_past and (not self.is_decoder_with_past): raise ValueError(f'The decoder part of the encoder-decoder model is {config.decoder.model_type} which does not need past key values.') self._decoder_onnx_config = decoder_onnx_config_constructor(config.decoder, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors, **kwargs) if issubclass(decoder_onnx_config_constructor.func, OnnxSeq2SeqConfigWithPast): self._decoder_onnx_config = self._decoder_onnx_config.with_behavior(self._behavior, use_past=kwargs['use_past'], use_past_in_inputs=use_past_in_inputs) self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS = self._decoder_onnx_config._normalized_config self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS = self._decoder_onnx_config._normalized_config self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS.encoder_num_attention_heads = self._decoder_onnx_config._normalized_config.num_attention_heads self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS.decoder_num_attention_heads = self._decoder_onnx_config._normalized_config.num_attention_heads if isinstance(self._decoder_onnx_config, OnnxSeq2SeqConfigWithPast): self._past_key_values_generator = (DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator) else: self._past_key_values_generator = (DummySeq2SeqDecoderTextInputGenerator, DummyPastKeyValuesGenerator) self.DUMMY_INPUT_GENERATOR_CLASSES += self._past_key_values_generator @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {} if self._behavior is not ConfigBehavior.DECODER: common_inputs['input_ids'] = {0: 'batch_size', 1: 'encoder_sequence_length'} common_inputs['attention_mask'] = {0: 'batch_size', 1: 'encoder_sequence_length'} if self._behavior is not ConfigBehavior.ENCODER: common_inputs.pop('attention_mask') if self.use_past_in_inputs: common_inputs['decoder_input_ids'] = {0: 'batch_size'} else: common_inputs['decoder_input_ids'] = {0: 'batch_size', 1: 'decoder_sequence_length'} if self.use_past_in_inputs: self.add_past_key_values(common_inputs, direction='inputs') if self._behavior is ConfigBehavior.DECODER: common_inputs['encoder_outputs'] = {0: 'batch_size', 1: 'encoder_sequence_length'} return common_inputs @property def torch_to_onnx_input_map(self) -> Dict[str, str]: if self._behavior is ConfigBehavior.DECODER: return {'decoder_input_ids': 'input_ids', 'encoder_outputs': 'encoder_hidden_states', 'attention_mask': 'encoder_attention_mask'} return {} def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if self.is_decoder_with_past: return self._decoder_onnx_config.add_past_key_values(inputs_or_outputs, direction) def flatten_past_key_values(self, flattened_output, name, idx, t): if self.is_decoder_with_past: return self._decoder_onnx_config.flatten_past_key_values(flattened_output, name, idx, t) def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]: return self._decoder_onnx_config.flatten_output_collection_property(name, field) def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: if self._behavior is ConfigBehavior.ENCODER: return self._encoder_onnx_config.generate_dummy_inputs_for_validation(reference_model_inputs) else: if self._behavior is ConfigBehavior.DECODER: reference_model_inputs['input_ids'] = reference_model_inputs.pop('decoder_input_ids') if 'encoder_outputs' in reference_model_inputs: if 'encoder_hidden_states' in onnx_input_names: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] else: reference_model_inputs.pop('encoder_outputs') return self._decoder_onnx_config.generate_dummy_inputs_for_validation(reference_model_inputs) def post_process_exported_models(self, path: Path, models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], onnx_files_subpaths: List[str]): (models_and_onnx_configs, onnx_files_subpaths) = super().post_process_exported_models(path, models_and_onnx_configs, onnx_files_subpaths) if self.use_past is True and len(models_and_onnx_configs) == 3: models_and_onnx_configs[ONNX_DECODER_NAME][1]._decoder_onnx_config.is_merged = True models_and_onnx_configs[ONNX_DECODER_NAME][1]._decoder_onnx_config.use_cache_branch = False models_and_onnx_configs[ONNX_DECODER_NAME][1]._decoder_onnx_config.use_past_in_inputs = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1]._decoder_onnx_config.use_cache_branch = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1]._decoder_onnx_config.is_merged = True return (models_and_onnx_configs, onnx_files_subpaths) # File: optimum-main/optimum/exporters/onnx/constants.py EXTERNAL_DATA_FORMAT_SIZE_LIMIT = 2 * 1024 * 1024 * 1024 ONNX_ENCODER_NAME = 'encoder_model' ONNX_DECODER_NAME = 'decoder_model' ONNX_DECODER_WITH_PAST_NAME = 'decoder_with_past_model' ONNX_DECODER_MERGED_NAME = 'decoder_model_merged' UNPICKABLE_ARCHS = ['encodec', 'hubert', 'sew', 'sew-d', 'speecht5', 'unispeech', 'unispeech-sat', 'wav2vec2', 'wav2vec2-conformer', 'wavlm'] SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED = ['bart', 'musicgen', 'whisper'] # File: optimum-main/optimum/exporters/onnx/convert.py """""" import copy import gc import multiprocessing as mp import os import traceback from inspect import signature from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np import onnx from transformers.modeling_utils import get_parameter_dtype from transformers.utils import is_tf_available, is_torch_available from ...onnx.utils import _get_onnx_external_constants, _get_onnx_external_data_tensors, check_model_uses_external_data from ...utils import DEFAULT_DUMMY_SHAPES, ONNX_WEIGHTS_NAME, TORCH_MINIMUM_VERSION, is_diffusers_available, is_torch_onnx_support_available, logging, require_numpy_strictly_lower from ...utils.modeling_utils import MODEL_TO_PATCH_FOR_PAST from ...utils.save_utils import maybe_save_preprocessors from ..error_utils import AtolError, MinimumVersionError, OutputMatchError, ShapeError from ..tasks import TasksManager from .base import OnnxConfig from .constants import UNPICKABLE_ARCHS from .model_configs import SpeechT5OnnxConfig from .utils import MODEL_TYPES_REQUIRING_POSITION_IDS, PickableInferenceSession, _get_submodels_and_onnx_configs, recursive_to_device if is_torch_available(): import torch import torch.nn as nn from transformers.modeling_utils import PreTrainedModel if is_diffusers_available(): from diffusers import DiffusionPipeline, ModelMixin if is_tf_available(): from transformers.modeling_tf_utils import TFPreTrainedModel logger = logging.get_logger(__name__) class DynamicAxisNameError(ValueError): pass def check_dummy_inputs_are_allowed(model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], dummy_input_names: Iterable[str]): forward = model.forward if is_torch_available() and isinstance(model, nn.Module) else model.call forward_parameters = signature(forward).parameters forward_inputs_set = set(forward_parameters.keys()) dummy_input_names = set(dummy_input_names) if not dummy_input_names.issubset(forward_inputs_set): raise ValueError(f'Config dummy inputs are not a subset of the model inputs: {dummy_input_names} vs {forward_inputs_set}') def validate_models_outputs(models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], onnx_named_outputs: List[List[str]], output_dir: Path, atol: Optional[float]=None, onnx_files_subpaths: Optional[List[str]]=None, input_shapes: Optional[Dict]=None, device: str='cpu', use_subprocess: Optional[bool]=True, model_kwargs: Optional[Dict[str, Any]]=None): if len(onnx_named_outputs) != len(models_and_onnx_configs.keys()): raise ValueError(f'Invalid number of ONNX named outputs. Required {len(models_and_onnx_configs.keys())}, Provided {len(onnx_named_outputs)}') if onnx_files_subpaths is not None and len(onnx_files_subpaths) != len(models_and_onnx_configs): raise ValueError(f'Provided custom names {onnx_files_subpaths} for the validation of {len(models_and_onnx_configs)} models. Please provide the same number of ONNX file names as models to export.') if use_subprocess: logger.info('Validating models in subprocesses...') exceptions = [] for (i, model_name) in enumerate(models_and_onnx_configs.keys()): (submodel, sub_onnx_config) = models_and_onnx_configs[model_name] onnx_model_path = output_dir.joinpath(onnx_files_subpaths[i]) if onnx_files_subpaths is not None else output_dir.joinpath(model_name + '.onnx') try: validate_model_outputs(config=sub_onnx_config, reference_model=submodel, onnx_model=onnx_model_path, onnx_named_outputs=onnx_named_outputs[i], atol=atol, input_shapes=input_shapes, device=device, use_subprocess=use_subprocess, model_kwargs=model_kwargs) except Exception as e: exceptions.append((onnx_model_path, e)) if len(exceptions) != 0: for (i, exception) in enumerate(exceptions[:-1]): logger.error(f'Validation for the model {exception[0].as_posix()} raised: {exception[1]}') raise exceptions[-1][1] def validate_model_outputs(config: OnnxConfig, reference_model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], onnx_model: Path, onnx_named_outputs: List[str], atol: Optional[float]=None, input_shapes: Optional[Dict]=None, device: str='cpu', use_subprocess: Optional[bool]=True, model_kwargs: Optional[Dict[str, Any]]=None): if use_subprocess: mp.set_start_method('spawn', force=True) io_process = ValidationProcess(config, reference_model, onnx_model, onnx_named_outputs, atol, input_shapes, device, model_kwargs) io_process.start() io_process.join() if io_process.exception: (error, traceback) = io_process.exception raise error else: _run_validation(config, reference_model, onnx_model, onnx_named_outputs, atol, input_shapes, device, model_kwargs=model_kwargs) def _run_validation(config: OnnxConfig, reference_model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], onnx_model: Path, onnx_named_outputs: List[str], atol: Optional[float]=None, input_shapes: Optional[Dict]=None, device: str='cpu', model_kwargs: Optional[Dict[str, Any]]=None): from onnxruntime import GraphOptimizationLevel, SessionOptions model_kwargs = model_kwargs if model_kwargs is not None else {} logger.info(f'\nValidating ONNX model {onnx_model.as_posix()}...') if atol is None: atol = config.ATOL_FOR_VALIDATION if 'diffusers' in str(reference_model.__class__) and (not is_diffusers_available()): raise ImportError('The pip package `diffusers` is required to validate diffusion ONNX models.') framework = 'pt' if is_torch_available() and isinstance(reference_model, nn.Module) else 'tf' if input_shapes is None: input_shapes = {} reference_model_inputs = config.generate_dummy_inputs(framework=framework, **input_shapes) session_options = SessionOptions() session_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC if device.startswith('cuda'): provider = 'CUDAExecutionProvider' else: provider = 'CPUExecutionProvider' session = PickableInferenceSession(onnx_model.as_posix(), sess_options=session_options, providers=[provider]) all_onnx_outputs = {output.name for output in session.get_outputs()} config_outputs = set(config.outputs) if all_onnx_outputs != config_outputs: if len(all_onnx_outputs) > len(config_outputs): diff = all_onnx_outputs - config_outputs else: diff = config_outputs - all_onnx_outputs raise OutputMatchError(f"The exported ONNX model does not have the exact same outputs as what is provided in {config.__class__.__name__}. Difference: {', '.join(diff)}") all_config_dynamic_axes_names = set() for input_ in config.inputs.values(): all_config_dynamic_axes_names |= set(input_.values()) for output in config.outputs.values(): all_config_dynamic_axes_names |= set(output.values()) for node in session.get_outputs(): for (idx, axis) in enumerate(node.shape): if isinstance(axis, str) and axis not in all_config_dynamic_axes_names: raise DynamicAxisNameError(f'The axis {idx} of input / output node called {node.name} has an unknown name: {axis}') if is_torch_available() and isinstance(reference_model, nn.Module): reference_model.to(device) for (key, value) in reference_model_inputs.items(): reference_model_inputs[key] = recursive_to_device(value=value, device=device) copy_reference_model_inputs = copy.deepcopy(reference_model_inputs) copy_reference_model_inputs = config.rename_ambiguous_inputs(copy_reference_model_inputs) with config.patch_model_for_export(reference_model, model_kwargs=model_kwargs): if is_torch_available() and isinstance(reference_model, nn.Module): with torch.inference_mode(): ref_outputs = reference_model(**copy_reference_model_inputs) else: ref_outputs = reference_model(**copy_reference_model_inputs) ref_outputs_dict = {} for (name, value) in ref_outputs.items(): if name == 'past_key_values': name = 'present' if isinstance(value, (list, tuple)): onnx_output_name = config.torch_to_onnx_output_map.get(name, name) value = config.flatten_output_collection_property(onnx_output_name, value) ref_outputs_dict.update(value) else: ref_outputs_dict[name] = value onnx_input_names = [inp.name for inp in session.get_inputs()] reference_ort_inputs = config.generate_dummy_inputs_for_validation(reference_model_inputs, onnx_input_names=onnx_input_names) onnx_inputs = {} for (name, value) in reference_ort_inputs.items(): if isinstance(value, (list, tuple)): value = config.flatten_output_collection_property(name, value) onnx_inputs.update({tensor_name: pt_tensor.cpu().numpy() for (tensor_name, pt_tensor) in value.items()}) elif isinstance(value, dict): onnx_inputs.update({tensor_name: pt_tensor.cpu().numpy() for (tensor_name, pt_tensor) in value.items()}) else: onnx_inputs[name] = value.cpu().numpy() onnx_outputs = session.run(onnx_named_outputs, onnx_inputs) onnx_to_torch = {v: k for (k, v) in config.torch_to_onnx_output_map.items()} onnx_named_outputs = [onnx_to_torch.get(k, k) for k in onnx_named_outputs] (ref_outputs_set, onnx_outputs_set) = (set(ref_outputs_dict.keys()), set(onnx_named_outputs)) if not onnx_outputs_set.issubset(ref_outputs_set): raise OutputMatchError(f'ONNX model output names do not match reference model output names.\nReference model output names: {ref_outputs_set}\nONNX model output names: {onnx_outputs_set}\nDifference: {onnx_outputs_set.difference(ref_outputs_set)}') else: onnx_output_names = ', '.join(onnx_outputs_set) logger.info(f'\t-[✓] ONNX model output names match reference model ({onnx_output_names})') if 'diffusers' in str(reference_model.__class__) and (not is_diffusers_available()): raise ImportError('The pip package `diffusers` is required to validate diffusion ONNX models.') shape_failures = [] value_failures = [] for (name, ort_value) in zip(onnx_named_outputs, onnx_outputs): if is_torch_available() and isinstance(reference_model, nn.Module): ref_value = ref_outputs_dict[name].detach().cpu().numpy() else: ref_value = ref_outputs_dict[name].cpu().numpy() logger.info(f'\t- Validating ONNX Model output "{name}":') if not ort_value.shape == ref_value.shape: logger.error(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}") shape_failures.append((name, ref_value.shape, ort_value.shape)) else: logger.info(f'\t\t-[✓] {ort_value.shape} matches {ref_value.shape}') try: if not np.allclose(ref_value, ort_value, atol=atol): max_diff = np.amax(np.abs(ref_value - ort_value)) logger.error(f'\t\t-[x] values not close enough, max diff: {max_diff} (atol: {atol})') value_failures.append((name, max_diff)) else: logger.info(f'\t\t-[✓] all values close (atol: {atol})') except Exception: pass if shape_failures: msg = '\n'.join((f'- {t[0]}: got {t[1]} (reference) and {t[2]} (ONNX)' for t in shape_failures)) raise ShapeError(f'Output shapes do not match between reference model and ONNX exported model:\n{msg}') if value_failures: msg = '\n'.join((f'- {t[0]}: max diff = {t[1]}' for t in value_failures)) atol_msg = f'The maximum absolute difference between the output of the reference model and the ONNX exported model is not within the set tolerance {atol}:\n{msg}' if isinstance(config, SpeechT5OnnxConfig): atol_msg += '\nIMPORTANT NOTE: SpeechT5 uses a dropout at inference and the output validation of ONNX Runtime inference vs PyTorch is expected to fail. Reference: https://github.com/huggingface/transformers/blob/v4.33.2/src/transformers/models/speecht5/modeling_speecht5.py#L727' raise AtolError(atol_msg) class ValidationProcess(mp.Process): def __init__(self, config: OnnxConfig, reference_model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], onnx_model: Path, onnx_named_outputs: List[str], atol: Optional[float]=None, input_shapes: Optional[Dict]=None, device: str='cpu', model_kwargs: Optional[Dict[str, Any]]=None): super().__init__() (self._pconn, self._cconn) = mp.Pipe() self._exception = None self.config = config self.reference_model = reference_model self.onnx_model = onnx_model self.onnx_named_outputs = onnx_named_outputs self.atol = atol self.input_shapes = input_shapes self.device = device self.model_kwargs = model_kwargs def run(self): try: _run_validation(config=self.config, reference_model=self.reference_model, onnx_model=self.onnx_model, onnx_named_outputs=self.onnx_named_outputs, atol=self.atol, input_shapes=self.input_shapes, device=self.device, model_kwargs=self.model_kwargs) except Exception as e: tb = traceback.format_exc() self._cconn.send((e, tb)) return @property def exception(self): if self._pconn.poll(): self._exception = self._pconn.recv() return self._exception def export_pytorch(model: Union['PreTrainedModel', 'ModelMixin'], config: OnnxConfig, opset: int, output: Path, device: str='cpu', input_shapes: Optional[Dict]=None, no_dynamic_axes: bool=False, do_constant_folding: bool=True, model_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[List[str], List[str]]: from torch.onnx import export as onnx_export from torch.utils._pytree import tree_map logger.info(f'Using framework PyTorch: {torch.__version__}') FORCE_ONNX_EXTERNAL_DATA = os.getenv('FORCE_ONNX_EXTERNAL_DATA', '0') == '1' with torch.no_grad(): model.config.return_dict = True model = model.eval() if config.values_override is not None: logger.info(f'Overriding {len(config.values_override)} configuration item(s)') for (override_config_key, override_config_value) in config.values_override.items(): logger.info(f'\t- {override_config_key} -> {override_config_value}') setattr(model.config, override_config_key, override_config_value) if input_shapes is None: input_shapes = {} dummy_inputs = config.generate_dummy_inputs(framework='pt', **input_shapes) device = torch.device(device) def remap(value): if isinstance(value, torch.Tensor): value = value.to(device) return value if device.type == 'cuda' and torch.cuda.is_available(): model.to(device) dummy_inputs = tree_map(remap, dummy_inputs) dummy_inputs = config.rename_ambiguous_inputs(dummy_inputs) with config.patch_model_for_export(model, model_kwargs=model_kwargs): check_dummy_inputs_are_allowed(model, dummy_inputs) inputs = config.ordered_inputs(model) input_names = list(inputs.keys()) output_names = list(config.outputs.keys()) if no_dynamic_axes: dynamix_axes = None else: dynamix_axes = dict(chain(inputs.items(), config.outputs.items())) onnx_export(model, (dummy_inputs,), f=output.as_posix(), input_names=input_names, output_names=output_names, dynamic_axes=dynamix_axes, do_constant_folding=do_constant_folding, opset_version=opset) onnx_model = onnx.load(str(output), load_external_data=False) model_uses_external_data = check_model_uses_external_data(onnx_model) if model_uses_external_data or FORCE_ONNX_EXTERNAL_DATA: tensors_paths = _get_onnx_external_data_tensors(onnx_model) constant_paths = _get_onnx_external_constants(onnx_model) logger.info('Saving external data to one file...') del model del onnx_model gc.collect() if device.type == 'cuda' and torch.cuda.is_available(): torch.cuda.empty_cache() onnx_model = onnx.load(str(output), load_external_data=True) onnx.save(onnx_model, str(output), save_as_external_data=True, all_tensors_to_one_file=True, location=output.name + '_data', size_threshold=1024 if not FORCE_ONNX_EXTERNAL_DATA else 100, convert_attribute=True) for tensor in tensors_paths: os.remove(output.parent / tensor) for tensor in constant_paths: if os.path.isfile(output.parent / tensor): os.remove(output.parent / tensor) return (input_names, output_names) @require_numpy_strictly_lower('1.24.0', 'The Tensorflow ONNX export only supports numpy<1.24.0.') def export_tensorflow(model: 'TFPreTrainedModel', config: OnnxConfig, opset: int, output: Path) -> Tuple[List[str], List[str]]: import sys import onnx import tensorflow as tf import tf2onnx sys_path_backup = sys.path sys.path.pop(0) sys.path = sys_path_backup logger.info(f'Using framework TensorFlow: {tf.__version__}') model.config.return_dict = True if config.values_override is not None: logger.info(f'Overriding {len(config.values_override)} configuration item(s)') for (override_config_key, override_config_value) in config.values_override.items(): logger.info(f'\t- {override_config_key} -> {override_config_value}') setattr(model.config, override_config_key, override_config_value) dummy_inputs = config.generate_dummy_inputs(framework='tf') check_dummy_inputs_are_allowed(model, dummy_inputs) inputs = config.ordered_inputs(model) input_names = list(inputs.keys()) output_names = list(config.outputs.keys()) input_signature = [] for (key, tensor) in dummy_inputs.items(): shape = [tensor.shape[i] for i in range(tensor.ndim)] for (idx, _) in config.inputs[key].items(): shape[idx] = None input_signature.append(tf.TensorSpec(shape, dtype=tensor.dtype, name=key)) with config.patch_model_for_export(model): (onnx_model, _) = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix(), convert_attribute=True) return (input_names, output_names) def export_models(models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], output_dir: Path, opset: Optional[int]=None, output_names: Optional[List[str]]=None, device: str='cpu', input_shapes: Optional[Dict]=None, disable_dynamic_axes_fix: Optional[bool]=False, dtype: Optional[str]=None, no_dynamic_axes: bool=False, do_constant_folding: bool=True, model_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[List[List[str]], List[List[str]]]: outputs = [] if output_names is not None and len(output_names) != len(models_and_onnx_configs): raise ValueError(f'Provided custom names {output_names} for the export of {len(models_and_onnx_configs)} models. Please provide the same number of names as models to export.') for (i, model_name) in enumerate(models_and_onnx_configs.keys()): (submodel, sub_onnx_config) = models_and_onnx_configs[model_name] output_name = output_names[i] if output_names is not None else Path(model_name + '.onnx') output_path = output_dir / output_name output_path.parent.mkdir(parents=True, exist_ok=True) logger.info(f'\n***** Exporting submodel {i + 1}/{len(models_and_onnx_configs)}: {submodel.__class__.__name__} *****') outputs.append(export(model=submodel, config=sub_onnx_config, output=output_path, opset=opset, device=device, input_shapes=input_shapes, disable_dynamic_axes_fix=disable_dynamic_axes_fix, dtype=dtype, no_dynamic_axes=no_dynamic_axes, do_constant_folding=do_constant_folding, model_kwargs=model_kwargs)) outputs = list(map(list, zip(*outputs))) return outputs def export(model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], config: OnnxConfig, output: Path, opset: Optional[int]=None, device: str='cpu', input_shapes: Optional[Dict]=None, disable_dynamic_axes_fix: Optional[bool]=False, dtype: Optional[str]=None, no_dynamic_axes: bool=False, do_constant_folding: bool=True, model_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[List[str], List[str]]: if not (is_torch_available() or is_tf_available()): raise ImportError('Cannot convert because neither PyTorch nor TensorFlow are installed. Please install torch or tensorflow first.') output.parent.mkdir(parents=True, exist_ok=True) export_output = None if opset is None: opset = config.DEFAULT_ONNX_OPSET if 'diffusers' in str(model.__class__) and (not is_diffusers_available()): raise ImportError('The pip package `diffusers` is required to export diffusion models to ONNX.') if not config.is_transformers_support_available: import transformers raise MinimumVersionError(f'The current version of Transformers does not allow for the export of the model. Minimum required is {config.MIN_TRANSFORMERS_VERSION}, got: {transformers.__version__}') if is_torch_available() and isinstance(model, nn.Module): from ...utils import torch_version if not is_torch_onnx_support_available(): raise MinimumVersionError(f'Unsupported PyTorch version, minimum required is {TORCH_MINIMUM_VERSION}, got: {torch_version}') if not config.is_torch_support_available: raise MinimumVersionError(f'Unsupported PyTorch version for this model. Minimum required is {config.MIN_TORCH_VERSION}, got: {torch.__version__}') export_output = export_pytorch(model, config, opset, output, device=device, input_shapes=input_shapes, no_dynamic_axes=no_dynamic_axes, do_constant_folding=do_constant_folding, model_kwargs=model_kwargs) elif is_tf_available() and issubclass(type(model), TFPreTrainedModel): if model_kwargs is not None: raise NotImplementedError('The argument `model_kwargs` is used only for PyTorch ONNX export, and unavailable for the Tensorflow export.') if device == 'cuda': raise RuntimeError('`tf2onnx` does not support export on CUDA device.') if input_shapes is not None: logger.info('`input_shapes` argument is not supported by the Tensorflow ONNX export and will be ignored.') export_output = export_tensorflow(model, config, opset, output) else: raise RuntimeError('You either provided a PyTorch model with only TensorFlow installed, or a TensorFlow model with only PyTorch installed.') if not disable_dynamic_axes_fix: config.fix_dynamic_axes(output, device=device, input_shapes=input_shapes, dtype=dtype) return export_output def onnx_export_from_model(model: Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline'], output: Union[str, Path], opset: Optional[int]=None, optimize: Optional[str]=None, monolith: bool=False, no_post_process: bool=False, atol: Optional[float]=None, do_validation: bool=True, model_kwargs: Optional[Dict[str, Any]]=None, custom_onnx_configs: Optional[Dict[str, 'OnnxConfig']]=None, fn_get_submodels: Optional[Callable]=None, _variant: str='default', legacy: bool=False, preprocessors: List=None, device: str='cpu', no_dynamic_axes: bool=False, task: Optional[str]=None, use_subprocess: bool=False, do_constant_folding: bool=True, **kwargs_shapes): TasksManager.standardize_model_attributes(model) if hasattr(model.config, 'export_model_type'): model_type = model.config.export_model_type.replace('_', '-') else: model_type = model.config.model_type.replace('_', '-') library_name = TasksManager.infer_library_from_model(model) custom_architecture = library_name == 'transformers' and model_type not in TasksManager._SUPPORTED_MODEL_TYPE if task is not None: task = TasksManager.map_from_synonym(task) else: try: task = TasksManager._infer_task_from_model_or_model_class(model=model) except (ValueError, KeyError) as e: raise RuntimeError(f"The model task could not be automatically inferred in `onnx_export_from_model`. Please provide the argument `task` with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}") if not custom_architecture and library_name != 'diffusers' and (task + '-with-past' in TasksManager.get_supported_tasks_for_model_type(model_type, 'onnx', library_name=library_name)) and (not monolith): task = task + '-with-past' logger.info(f'Automatic task detection to: {task}.') framework = 'pt' if is_torch_available() and isinstance(model, torch.nn.Module) else 'tf' dtype = get_parameter_dtype(model) if framework == 'pt' else model.dtype if 'bfloat16' in str(dtype): float_dtype = 'bf16' elif 'float16' in str(dtype): float_dtype = 'fp16' else: float_dtype = 'fp32' if custom_architecture and custom_onnx_configs is None: raise ValueError(f'Trying to export a {model_type} model, that is a custom or unsupported architecture, but no custom onnx configuration was passed as `custom_onnx_configs`. Please refer to https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#custom-export-of-transformers-models for an example on how to export custom models. Please open an issue at https://github.com/huggingface/optimum/issues if you would like the model type {model_type} to be supported natively in the ONNX export.') if task.startswith('text-generation') and model.config.is_encoder_decoder: raise ValueError(f"model.config.is_encoder_decoder is True and task is `{task}`, which are incompatible. If the task was auto-inferred, please fill a bug reportat https://github.com/huggingface/optimum, if --task was explicitely passed, make sure you selected the right task for the model, referring to `optimum.exporters.tasks.TaskManager`'s `_TRANSFORMERS_TASKS_TO_MODEL_LOADERS`.") if legacy and model_type in MODEL_TYPES_REQUIRING_POSITION_IDS and task.startswith('text-generation'): logger.warning(f'legacy=True was specified in the ONNX export, although the model {model_type} requires position_ids for batched inference. Passing `legacy=True` is strongly discouraged, and this option will be removed in a future release. Reference: https://github.com/huggingface/optimum/pull/1381') if library_name != 'diffusers' and model_type in TasksManager._UNSUPPORTED_CLI_MODEL_TYPE: raise ValueError(f'{model_type} is not supported yet. Only {list(TasksManager._SUPPORTED_CLI_MODEL_TYPE.keys())} are supported. If you want to support {model_type} please propose a PR or open up an issue.') output = Path(output) if not output.exists(): output.mkdir(parents=True) input_shapes = {} for input_name in DEFAULT_DUMMY_SHAPES.keys(): input_shapes[input_name] = kwargs_shapes[input_name] if input_name in kwargs_shapes else DEFAULT_DUMMY_SHAPES[input_name] if model_type in MODEL_TO_PATCH_FOR_PAST and input_name == 'sequence_length' and (kwargs_shapes.get(input_name) == 1): raise ValueError(f'Exporting with a sequence length of 1 a {model_type} model is not supported and can yield unexpected results.') (onnx_config, models_and_onnx_configs) = _get_submodels_and_onnx_configs(model=model, task=task, monolith=monolith, custom_onnx_configs=custom_onnx_configs if custom_onnx_configs is not None else {}, custom_architecture=custom_architecture, float_dtype=float_dtype, fn_get_submodels=fn_get_submodels, preprocessors=preprocessors, _variant=_variant, legacy=legacy, library_name=library_name, model_kwargs=model_kwargs) if library_name != 'diffusers': if opset is None: opset = onnx_config.DEFAULT_ONNX_OPSET elif opset < onnx_config.DEFAULT_ONNX_OPSET: logger.warning(f'Opset {opset} is lower than the recommended minmum opset ({onnx_config.DEFAULT_ONNX_OPSET}) to export {model_type}. The ONNX export may fail or the exported model may be suboptimal.') if atol is None: atol = onnx_config.ATOL_FOR_VALIDATION if isinstance(atol, dict): atol = atol[task.replace('-with-past', '')] model.config.save_pretrained(output) generation_config = getattr(model, 'generation_config', None) if generation_config is not None: try: generation_config.save_pretrained(output) except Exception as exception: logger.warning(f'The generation config is invalid and will not be saved : {exception}') model_name_or_path = model.config._name_or_path maybe_save_preprocessors(model_name_or_path, output) onnx_files_subpaths = [key + '.onnx' for key in models_and_onnx_configs.keys()] else: for model_name in models_and_onnx_configs: subcomponent = models_and_onnx_configs[model_name][0] if hasattr(subcomponent, 'save_config'): subcomponent.save_config(output / model_name) elif hasattr(subcomponent, 'config') and hasattr(subcomponent.config, 'save_pretrained'): subcomponent.config.save_pretrained(output / model_name) onnx_files_subpaths = [os.path.join(name_dir, ONNX_WEIGHTS_NAME) for name_dir in models_and_onnx_configs] model.scheduler.save_pretrained(output.joinpath('scheduler')) feature_extractor = getattr(model, 'feature_extractor', None) if feature_extractor is not None: feature_extractor.save_pretrained(output.joinpath('feature_extractor')) tokenizer = getattr(model, 'tokenizer', None) if tokenizer is not None: tokenizer.save_pretrained(output.joinpath('tokenizer')) tokenizer_2 = getattr(model, 'tokenizer_2', None) if tokenizer_2 is not None: tokenizer_2.save_pretrained(output.joinpath('tokenizer_2')) model.save_config(output) if float_dtype == 'bf16': logger.warning(f'Exporting the model {model.__class__.__name__} in bfloat16 float dtype. After the export, ONNX Runtime InferenceSession with CPU/CUDA execution provider likely does not implement all operators for the bfloat16 data type, and the loading is likely to fail.') (_, onnx_outputs) = export_models(models_and_onnx_configs=models_and_onnx_configs, opset=opset, output_dir=output, output_names=onnx_files_subpaths, input_shapes=input_shapes, device=device, dtype=float_dtype, no_dynamic_axes=no_dynamic_axes, do_constant_folding=do_constant_folding, model_kwargs=model_kwargs) if optimize is not None: from ...onnxruntime import AutoOptimizationConfig, ORTOptimizer optimizer = ORTOptimizer.from_pretrained(output, file_names=onnx_files_subpaths) optimization_config = AutoOptimizationConfig.with_optimization_level(optimization_level=optimize) optimization_config.disable_shape_inference = True optimizer.optimize(save_dir=output, optimization_config=optimization_config, file_suffix='') if not no_post_process and library_name != 'diffusers': try: logger.info('Post-processing the exported models...') (models_and_onnx_configs, onnx_files_subpaths) = onnx_config.post_process_exported_models(output, models_and_onnx_configs, onnx_files_subpaths) except Exception as e: raise Exception(f'The post-processing of the ONNX export failed. The export can still be performed by passing the option --no-post-process. Detailed error: {e}') if library_name == 'diffusers': use_subprocess = False elif model_type in UNPICKABLE_ARCHS: use_subprocess = False if device == 'cpu': use_subprocess = False if do_validation is True: try: validate_models_outputs(models_and_onnx_configs=models_and_onnx_configs, onnx_named_outputs=onnx_outputs, atol=atol, output_dir=output, onnx_files_subpaths=onnx_files_subpaths, input_shapes=input_shapes, device=device, use_subprocess=use_subprocess, model_kwargs=model_kwargs) logger.info(f'The ONNX export succeeded and the exported model was saved at: {output.as_posix()}') except ShapeError as e: raise e except AtolError as e: logger.warning(f'The ONNX export succeeded with the warning: {e}.\n The exported model was saved at: {output.as_posix()}') except OutputMatchError as e: logger.warning(f'The ONNX export succeeded with the warning: {e}.\n The exported model was saved at: {output.as_posix()}') except Exception as e: raise Exception(f'An error occured during validation, but the model was saved nonetheless at {output.as_posix()}. Detailed error: {e}.') # File: optimum-main/optimum/exporters/onnx/model_configs.py """""" import random from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union from packaging import version from transformers.utils import is_tf_available from ...onnx import merge_decoders from ...utils import DEFAULT_DUMMY_SHAPES, BloomDummyPastKeyValuesGenerator, DummyAudioInputGenerator, DummyCodegenDecoderTextInputGenerator, DummyDecoderTextInputGenerator, DummyEncodecInputGenerator, DummyInputGenerator, DummyIntGenerator, DummyPastKeyValuesGenerator, DummyPix2StructInputGenerator, DummyPointsGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator, DummySpeechT5InputGenerator, DummyTextInputGenerator, DummyTimestepInputGenerator, DummyVisionEmbeddingsGenerator, DummyVisionEncoderDecoderPastKeyValuesGenerator, DummyVisionInputGenerator, DummyXPathSeqInputGenerator, FalconDummyPastKeyValuesGenerator, GemmaDummyPastKeyValuesGenerator, GPTBigCodeDummyPastKeyValuesGenerator, MistralDummyPastKeyValuesGenerator, NormalizedConfig, NormalizedEncoderDecoderConfig, NormalizedSeq2SeqConfig, NormalizedTextAndVisionConfig, NormalizedTextConfig, NormalizedTextConfigWithGQA, NormalizedVisionConfig, check_if_transformers_greater, is_diffusers_available, logging from ...utils.normalized_config import NormalizedConfigManager from .base import ConfigBehavior, OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from .config import AudioOnnxConfig, AudioToTextOnnxConfig, EncoderDecoderBaseOnnxConfig, TextAndVisionOnnxConfig, TextDecoderOnnxConfig, TextDecoderWithPositionIdsOnnxConfig, TextEncoderOnnxConfig, TextSeq2SeqOnnxConfig, VisionOnnxConfig from .constants import ONNX_DECODER_MERGED_NAME, ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME from .model_patcher import CLIPModelPatcher, FalconModelPatcher, MistralModelPatcher, MusicgenModelPatcher, SAMModelPatcher, SentenceTransformersCLIPPatcher, SentenceTransformersTransformerPatcher, SpeechT5ModelPatcher, VisionEncoderDecoderPatcher, WavLMModelPatcher if TYPE_CHECKING: from transformers import PretrainedConfig from transformers.modeling_utils import PreTrainedModel from .model_patcher import ModelPatcher if is_tf_available(): from transformers.modeling_tf_utils import TFPreTrainedModel if is_diffusers_available(): from diffusers import ModelMixin logger = logging.get_logger(__name__) class BertOnnxConfig(TextEncoderOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig ATOL_FOR_VALIDATION = 0.0001 DEFAULT_ONNX_OPSET = 14 @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch_size', 1: 'num_choices', 2: 'sequence_length'} else: dynamic_axis = {0: 'batch_size', 1: 'sequence_length'} return {'input_ids': dynamic_axis, 'attention_mask': dynamic_axis, 'token_type_ids': dynamic_axis} class AlbertOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class ConvBertOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class ElectraOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class RoFormerOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class SqueezeBertOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class MobileBertOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class NystromformerOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class XLMOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class SplinterOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class DistilBertOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch_size', 1: 'num_choices', 2: 'sequence_length'} else: dynamic_axis = {0: 'batch_size', 1: 'sequence_length'} return {'input_ids': dynamic_axis, 'attention_mask': dynamic_axis} class MPNetOnnxConfig(DistilBertOnnxConfig): DEFAULT_ONNX_OPSET = 12 class RobertaOnnxConfig(DistilBertOnnxConfig): pass class CamembertOnnxConfig(DistilBertOnnxConfig): pass class FlaubertOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class IBertOnnxConfig(DistilBertOnnxConfig): pass class XLMRobertaOnnxConfig(DistilBertOnnxConfig): pass class DebertaOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 12 @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = super().inputs if self._config.type_vocab_size == 0: common_inputs.pop('token_type_ids') return common_inputs class MarkupLMOnnxConfig(BertOnnxConfig): DEFAULT_ONNX_OPSET = 11 DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummyXPathSeqInputGenerator) @property def inputs(self) -> Dict[str, Dict[int, str]]: dynamic_axis = {0: 'batch_size', 1: 'sequence_length'} xpath_dynamic_axis = {0: 'batch_size', 1: 'sequence_length', 2: 'max_depth'} return {'input_ids': dynamic_axis, 'attention_mask': dynamic_axis, 'token_type_ids': dynamic_axis, 'xpath_subs_seq': xpath_dynamic_axis, 'xpath_tags_seq': xpath_dynamic_axis} class DebertaV2OnnxConfig(DebertaOnnxConfig): pass class EsmOnnxConfig(TextEncoderOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig ATOL_FOR_VALIDATION = 0.0001 DEFAULT_ONNX_OPSET = 12 @property def inputs(self) -> Dict[str, Dict[int, str]]: dynamic_axis = {0: 'batch_size', 1: 'sequence_length'} return {'input_ids': dynamic_axis, 'attention_mask': dynamic_axis} class GPT2OnnxConfig(TextDecoderWithPositionIdsOnnxConfig): DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_layers='n_layer', num_attention_heads='n_head') class GPTJOnnxConfig(GPT2OnnxConfig): pass class CodeGenOnnxConfig(GPT2OnnxConfig): pass class ImageGPTOnnxConfig(GPT2OnnxConfig): pass class GPTNeoOnnxConfig(TextDecoderWithPositionIdsOnnxConfig): DEFAULT_ONNX_OPSET = 13 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_attention_heads='num_heads') class GPTNeoXOnnxConfig(TextDecoderWithPositionIdsOnnxConfig): DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig class OPTOnnxConfig(TextDecoderOnnxConfig): DEFAULT_ONNX_OPSET = 13 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig class LlamaOnnxConfig(TextDecoderWithPositionIdsOnnxConfig): DEFAULT_ONNX_OPSET = 14 DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, MistralDummyPastKeyValuesGenerator) DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator NORMALIZED_CONFIG_CLASS = NormalizedTextConfig class Qwen2OnnxConfig(LlamaOnnxConfig): MIN_TRANSFORMERS_VERSION = version.parse('4.37.0') class GemmaOnnxConfig(LlamaOnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, GemmaDummyPastKeyValuesGenerator) DUMMY_PKV_GENERATOR_CLASS = GemmaDummyPastKeyValuesGenerator pass class PhiOnnxConfig(TextDecoderWithPositionIdsOnnxConfig): DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig MIN_TRANSFORMERS_VERSION = version.parse('4.36.0') class Phi3OnnxConfig(PhiOnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (MistralDummyPastKeyValuesGenerator,) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator NORMALIZED_CONFIG_CLASS = NormalizedTextConfigWithGQA MIN_TRANSFORMERS_VERSION = version.parse('4.41.0') class MistralOnnxConfig(TextDecoderWithPositionIdsOnnxConfig): MIN_TRANSFORMERS_VERSION = version.parse('4.34.99') DEFAULT_ONNX_OPSET = 14 DUMMY_INPUT_GENERATOR_CLASSES = (MistralDummyPastKeyValuesGenerator,) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES DUMMY_PKV_GENERATOR_CLASS = MistralDummyPastKeyValuesGenerator NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_key_value_heads='num_key_value_heads', allow_new=True) def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return MistralModelPatcher(self, model, model_kwargs=model_kwargs) class MPTOnnxConfig(TextDecoderOnnxConfig): DEFAULT_ONNX_OPSET = 13 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_attention_heads='n_heads', hidden_size='d_model', num_layers='n_layers') class BloomOnnxConfig(TextDecoderOnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (BloomDummyPastKeyValuesGenerator,) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES DUMMY_PKV_GENERATOR_CLASS = BloomDummyPastKeyValuesGenerator NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(num_layers='n_layer', num_attention_heads='n_head') DEFAULT_ONNX_OPSET = 14 def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if check_if_transformers_greater('4.44'): super().add_past_key_values(inputs_or_outputs, direction) else: if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') if direction == 'inputs': decoder_sequence_name = 'past_sequence_length' name = 'past_key_values' else: decoder_sequence_name = 'past_sequence_length + 1' name = 'present' for i in range(self._normalized_config.num_layers): inputs_or_outputs[f'{name}.{i}.key'] = {0: 'batch_size x num_heads', 2: decoder_sequence_name} inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch_size x num_heads', 1: decoder_sequence_name} class GPTBigCodeOnnxConfig(TextDecoderWithPositionIdsOnnxConfig): DUMMY_INPUT_GENERATOR_CLASSES = (GPTBigCodeDummyPastKeyValuesGenerator,) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES DEFAULT_ONNX_OPSET = 14 DUMMY_PKV_GENERATOR_CLASS = GPTBigCodeDummyPastKeyValuesGenerator NORMALIZED_CONFIG_CLASS = NormalizedConfigManager.get_normalized_config_class('gpt_bigcode') def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') if direction == 'inputs': decoder_sequence_name = 'past_sequence_length' name = 'past_key_values' else: decoder_sequence_name = 'past_sequence_length + 1' name = 'present' for i in range(self._normalized_config.num_layers): inputs_or_outputs[f'{name}.{i}.key_value'] = {0: 'batch_size', 1: decoder_sequence_name} def flatten_past_key_values(self, flattened_output, name, idx, t): flattened_output[f'{name}.{idx}.key_value'] = t class FalconOnnxConfig(TextDecoderOnnxConfig): MIN_TRANSFORMERS_VERSION = version.parse('4.35.99') DUMMY_INPUT_GENERATOR_CLASSES = (FalconDummyPastKeyValuesGenerator,) + TextDecoderOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedTextConfig DUMMY_PKV_GENERATOR_CLASS = FalconDummyPastKeyValuesGenerator def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, preprocessors=preprocessors, legacy=legacy) self._normalized_config.num_kv_heads = self._normalized_config.num_kv_heads if self._normalized_config.new_decoder_architecture or not self._normalized_config.multi_query else 1 @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = super().inputs if not self.legacy and (not self._config.alibi) and (self.task in ['text-generation', 'feature-extraction']): common_inputs['position_ids'] = {0: 'batch_size', 1: 'sequence_length'} return common_inputs def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return FalconModelPatcher(self, model, model_kwargs=model_kwargs) class T5DummySeq2SeqPastKeyValuesGenerator(DummySeq2SeqPastKeyValuesGenerator): def generate(self, input_name: str, framework: str='pt', int_dtype: str='int64', float_dtype: str='fp32'): encoder_shape = (self.batch_size, self.normalized_config.encoder_num_attention_heads, self.encoder_sequence_length, self.normalized_config.key_value_dim) decoder_shape = (self.batch_size, self.normalized_config.decoder_num_attention_heads, self.sequence_length, self.normalized_config.key_value_dim) return [(self.random_float_tensor(decoder_shape, framework=framework, dtype=float_dtype), self.random_float_tensor(decoder_shape, framework=framework, dtype=float_dtype), self.random_float_tensor(encoder_shape, framework=framework, dtype=float_dtype), self.random_float_tensor(encoder_shape, framework=framework, dtype=float_dtype)) for _ in range(self.normalized_config.decoder_num_layers)] class T5OnnxConfig(TextSeq2SeqOnnxConfig): DEFAULT_ONNX_OPSET = 13 DUMMY_INPUT_GENERATOR_CLASSES = TextSeq2SeqOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[:-1] + (T5DummySeq2SeqPastKeyValuesGenerator,) DUMMY_PKV_GENERATOR_CLASS = T5DummySeq2SeqPastKeyValuesGenerator NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(hidden_size='d_model', num_attention_heads='num_heads', encoder_num_layers='num_layers', decoder_num_layers='num_decoder_layers', key_value_dim='d_kv', allow_new=True) def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: if self._behavior is ConfigBehavior.DECODER: reference_model_inputs['input_ids'] = reference_model_inputs.pop('decoder_input_ids') if onnx_input_names is not None: if 'encoder_outputs' in reference_model_inputs: if 'encoder_hidden_states' in onnx_input_names: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] else: reference_model_inputs.pop('encoder_outputs') else: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] return super().generate_dummy_inputs_for_validation(reference_model_inputs) class MT5OnnxConfig(T5OnnxConfig): ATOL_FOR_VALIDATION = 0.0001 class LongT5OnnxConfig(T5OnnxConfig): DEFAULT_ONNX_OPSET = 14 class BartDummyTextInputGenerator(DummyTextInputGenerator): def __init__(self, task: str, normalized_config: NormalizedSeq2SeqConfig, batch_size: int=DEFAULT_DUMMY_SHAPES['batch_size'], sequence_length: int=DEFAULT_DUMMY_SHAPES['sequence_length'], num_choices: int=DEFAULT_DUMMY_SHAPES['num_choices'], random_batch_size_range: Optional[Tuple[int, int]]=None, random_sequence_length_range: Optional[Tuple[int, int]]=None, random_num_choices_range: Optional[Tuple[int, int]]=None, force_eos_token_id_presence: bool=True, **kwargs): super().__init__(task=task, normalized_config=normalized_config, batch_size=batch_size, sequence_length=sequence_length, num_choices=num_choices, random_batch_size_range=random_batch_size_range, random_sequence_length_range=random_sequence_length_range, random_num_choices_range=random_num_choices_range) self.force_eos_token_id_presence = force_eos_token_id_presence self.eos_token_id = normalized_config.eos_token_id def generate(self, input_name: str, framework: str='pt', int_dtype: str='int64', float_dtype: str='fp32'): int_tensor = super().generate(input_name, framework=framework, int_dtype=int_dtype, float_dtype=float_dtype) if self.force_eos_token_id_presence and 'input_ids' in input_name and (self.task == 'text-classification'): for idx in range(self.batch_size): if self.eos_token_id in int_tensor[idx]: continue random_idx = random.randint(1, self.sequence_length - 1) int_tensor[idx][random_idx] = self.eos_token_id return int_tensor class M2M100OnnxConfig(TextSeq2SeqOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(encoder_num_layers='encoder_layers', decoder_num_layers='decoder_layers', num_layers='decoder_layers', encoder_num_attention_heads='encoder_attention_heads', decoder_num_attention_heads='decoder_attention_heads', eos_token_id='eos_token_id') DUMMY_INPUT_GENERATOR_CLASSES = (BartDummyTextInputGenerator, {'feature-extraction': DummySeq2SeqDecoderTextInputGenerator, 'text-generation': DummyDecoderTextInputGenerator}, {'feature-extraction': DummySeq2SeqPastKeyValuesGenerator, 'text-generation': DummyPastKeyValuesGenerator}) def _create_dummy_input_generator_classes(self, **kwargs) -> List['DummyInputGenerator']: dummy_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[0](self.task, self._normalized_config, **kwargs) task = 'feature-extraction' if self.task != 'text-generation' else 'text-generation' dummy_decoder_text_input_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[1][task](self.task, self._normalized_config, **kwargs) if self.task != 'text-generation': kwargs['encoder_sequence_length'] = dummy_text_input_generator.sequence_length dummy_seq2seq_past_key_values_generator = self.DUMMY_INPUT_GENERATOR_CLASSES[2][task](self.task, self._normalized_config, **kwargs) dummy_inputs_generators = [dummy_text_input_generator, dummy_decoder_text_input_generator, dummy_seq2seq_past_key_values_generator] return dummy_inputs_generators @property def inputs_for_default_and_seq2seq_lm(self): return super().inputs @property def inputs_for_causal_lm(self): if self.use_past_in_inputs: common_inputs = {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'past_sequence_length + 1'}} for i in range(self._normalized_config.decoder_num_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch_size', 2: 'past_sequence_length'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch_size', 2: 'past_sequence_length'} else: common_inputs = {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}} return common_inputs @property def inputs_for_other_tasks(self): return {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}} @property def inputs(self) -> Dict[str, Dict[int, str]]: inputs_properties = {'feature-extraction': self.inputs_for_default_and_seq2seq_lm, 'text2text-generation': self.inputs_for_default_and_seq2seq_lm, 'text-generation': self.inputs_for_causal_lm, 'other': self.inputs_for_other_tasks} return inputs_properties.get(self.task, inputs_properties['other']) @property def outputs(self) -> Dict[str, Dict[int, str]]: if self.task in ['feature-extraction', 'text2text-generation']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: for i in range(self._normalized_config.encoder_num_layers if self.task != 'text-generation' else self._normalized_config.decoder_num_layers): common_outputs[f'present.{i}.key'] = {0: 'batch_size', 2: 'past_sequence_length + sequence_length'} common_outputs[f'present.{i}.value'] = {0: 'batch_size', 2: 'past_sequence_length + sequence_length'} return common_outputs def generate_dummy_inputs(self, framework: str='pt', **kwargs): if self.task == 'text-generation': self.PAD_ATTENTION_MASK_TO_PAST = True dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs) self.PAD_ATTENTION_MASK_TO_PAST = False return dummy_inputs def flatten_past_key_values(self, flattened_output, name, idx, t): if self.task in ['feature-extraction', 'text2text-generation']: flattened_output = super().flatten_past_key_values(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self).flatten_past_key_values(flattened_output, name, idx, t) class BartOnnxConfig(M2M100OnnxConfig): DEFAULT_ONNX_OPSET = 14 MIN_TORCH_VERSION = version.parse('2.1.2') pass class MBartOnnxConfig(BartOnnxConfig): pass class BlenderbotOnnxConfig(BartOnnxConfig): pass class BlenderbotSmallOnnxConfig(BartOnnxConfig): pass '' class PegasusOnnxConfig(BartOnnxConfig): pass class MarianOnnxConfig(BartOnnxConfig): pass class ViTOnnxConfig(VisionOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig MIN_TORCH_VERSION = version.parse('1.11') DEFAULT_ONNX_OPSET = 14 @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'pixel_values': {0: 'batch_size', 1: 'num_channels', 2: 'height', 3: 'width'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = super().outputs if self.task == 'feature-extraction': common_outputs['last_hidden_state'] = {0: 'batch_size'} return common_outputs class CvTOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 13 ATOL_FOR_VALIDATION = 0.01 class LevitOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class DeiTOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 14 class BeitOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class ConvNextOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class ConvNextV2OnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class MobileViTOnnxConfig(ViTOnnxConfig): ATOL_FOR_VALIDATION = 0.0001 DEFAULT_ONNX_OPSET = 11 class RegNetOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class ResNetOnnxConfig(ViTOnnxConfig): ATOL_FOR_VALIDATION = 0.001 DEFAULT_ONNX_OPSET = 11 class DetrOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 12 @property def outputs(self) -> Dict[str, Dict[int, str]]: if self.task == 'image-segmentation': return {'logits': {0: 'batch_size', 1: 'num_queries'}, 'pred_masks': {0: 'batch_size', 1: 'num_queries'}} else: return super().outputs class TableTransformerOnnxConfig(DetrOnnxConfig): pass class YolosOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 14 class SwinOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class Swin2srOnnxConfig(SwinOnnxConfig): pass class DptOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class GlpnOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class PoolFormerOnnxConfig(ViTOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig ATOL_FOR_VALIDATION = 0.002 DEFAULT_ONNX_OPSET = 11 class SegformerOnnxConfig(YolosOnnxConfig): pass class MobileNetV1OnnxConfig(ViTOnnxConfig): ATOL_FOR_VALIDATION = 0.0001 DEFAULT_ONNX_OPSET = 11 @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'pixel_values': {0: 'batch_size'}} class MobileNetV2OnnxConfig(MobileNetV1OnnxConfig): pass class DonutSwinOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class TimmDefaultOnnxConfig(ViTOnnxConfig): ATOL_FOR_VALIDATION = 0.001 DEFAULT_ONNX_OPSET = 12 def rename_ambiguous_inputs(self, inputs): model_inputs = {} model_inputs['x'] = inputs['pixel_values'] return model_inputs @property def torch_to_onnx_input_map(self) -> Dict[str, str]: return {'x': 'pixel_values'} class SentenceTransformersTransformerOnnxConfig(TextEncoderOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig DEFAULT_ONNX_OPSET = 14 @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'token_embeddings': {0: 'batch_size', 1: 'sequence_length'}, 'sentence_embedding': {0: 'batch_size'}} def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return SentenceTransformersTransformerPatcher(self, model, model_kwargs=model_kwargs) class CLIPNormalizedConfig(NormalizedTextAndVisionConfig): TEXT_CONFIG = 'text_config' VISION_CONFIG = 'vision_config' class CLIPVisionModelOnnxConfig(VisionOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'pixel_values': {0: 'batch_size', 1: 'num_channels', 2: 'height', 3: 'width'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = super().outputs common_outputs['last_hidden_state'] = {0: 'batch_size'} common_outputs['pooler_output'] = {0: 'batch_size'} return common_outputs def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return CLIPModelPatcher(self, model, model_kwargs=model_kwargs) class CLIPOnnxConfig(TextAndVisionOnnxConfig): NORMALIZED_CONFIG_CLASS = CLIPNormalizedConfig @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_ids': {0: 'text_batch_size', 1: 'sequence_length'}, 'pixel_values': {0: 'image_batch_size', 1: 'num_channels', 2: 'height', 3: 'width'}, 'attention_mask': {0: 'text_batch_size', 1: 'sequence_length'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'logits_per_image': {0: 'image_batch_size', 1: 'text_batch_size'}, 'logits_per_text': {0: 'text_batch_size', 1: 'image_batch_size'}, 'text_embeds': {0: 'text_batch_size'}, 'image_embeds': {0: 'image_batch_size'}} def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return CLIPModelPatcher(self, model, model_kwargs=model_kwargs) class SentenceTransformersCLIPOnnxConfig(CLIPOnnxConfig): @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'text_embeds': {0: 'text_batch_size'}, 'image_embeds': {0: 'image_batch_size'}} def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return SentenceTransformersCLIPPatcher(self, model, model_kwargs=model_kwargs) class CLIPTextWithProjectionOnnxConfig(TextEncoderOnnxConfig): ATOL_FOR_VALIDATION = 0.001 DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(vocab_size='vocab_size', sequence_length='max_position_embeddings', num_layers='num_hidden_layers', allow_new=True) @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_ids': {0: 'batch_size', 1: 'sequence_length'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = {'text_embeds': {0: 'batch_size', 1: 'sequence_length'}, 'last_hidden_state': {0: 'batch_size', 1: 'sequence_length'}} if self._normalized_config.output_hidden_states: for i in range(self._normalized_config.num_layers + 1): common_outputs[f'hidden_states.{i}'] = {0: 'batch_size', 1: 'sequence_length'} return common_outputs def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return CLIPModelPatcher(self, model, model_kwargs=model_kwargs) class CLIPTextOnnxConfig(CLIPTextWithProjectionOnnxConfig): @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = {'last_hidden_state': {0: 'batch_size', 1: 'sequence_length'}, 'pooler_output': {0: 'batch_size'}} if self._normalized_config.output_hidden_states: for i in range(self._normalized_config.num_layers + 1): common_outputs[f'hidden_states.{i}'] = {0: 'batch_size', 1: 'sequence_length'} return common_outputs def generate_dummy_inputs(self, framework: str='pt', **kwargs): dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs) if framework == 'pt': import torch dummy_inputs['input_ids'] = dummy_inputs['input_ids'].to(dtype=torch.int32) return dummy_inputs def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return CLIPModelPatcher(self, model, model_kwargs=model_kwargs) class UNetOnnxConfig(VisionOnnxConfig): ATOL_FOR_VALIDATION = 0.001 DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(image_size='sample_size', num_channels='in_channels', hidden_size='cross_attention_dim', vocab_size='norm_num_groups', allow_new=True) DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator, DummyTimestepInputGenerator, DummySeq2SeqDecoderTextInputGenerator) @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {'sample': {0: 'batch_size', 2: 'height', 3: 'width'}, 'timestep': {0: 'steps'}, 'encoder_hidden_states': {0: 'batch_size', 1: 'sequence_length'}} if getattr(self._normalized_config, 'addition_embed_type', None) == 'text_time': common_inputs['text_embeds'] = {0: 'batch_size'} common_inputs['time_ids'] = {0: 'batch_size'} if getattr(self._normalized_config, 'time_cond_proj_dim', None) is not None: common_inputs['timestep_cond'] = {0: 'batch_size'} return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'out_sample': {0: 'batch_size', 2: 'height', 3: 'width'}} @property def torch_to_onnx_output_map(self) -> Dict[str, str]: return {'sample': 'out_sample'} def generate_dummy_inputs(self, framework: str='pt', **kwargs): dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs) dummy_inputs['encoder_hidden_states'] = dummy_inputs['encoder_hidden_states'][0] if getattr(self._normalized_config, 'addition_embed_type', None) == 'text_time': dummy_inputs['added_cond_kwargs'] = {'text_embeds': dummy_inputs.pop('text_embeds'), 'time_ids': dummy_inputs.pop('time_ids')} return dummy_inputs def ordered_inputs(self, model) -> Dict[str, Dict[int, str]]: inputs = super().ordered_inputs(model=model) if getattr(self._normalized_config, 'addition_embed_type', None) == 'text_time': inputs['text_embeds'] = self.inputs['text_embeds'] inputs['time_ids'] = self.inputs['time_ids'] return inputs class VaeEncoderOnnxConfig(VisionOnnxConfig): ATOL_FOR_VALIDATION = 0.01 DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(num_channels='in_channels', image_size='sample_size', allow_new=True) @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'sample': {0: 'batch_size', 2: 'height', 3: 'width'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'latent_sample': {0: 'batch_size', 2: 'height_latent', 3: 'width_latent'}} class VaeDecoderOnnxConfig(VisionOnnxConfig): ATOL_FOR_VALIDATION = 0.001 DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(num_channels='latent_channels', allow_new=True) @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'latent_sample': {0: 'batch_size', 2: 'height_latent', 3: 'width_latent'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'sample': {0: 'batch_size', 2: 'height', 3: 'width'}} class GroupViTOnnxConfig(CLIPOnnxConfig): pass class OwlViTOnnxConfig(CLIPOnnxConfig): ATOL_FOR_VALIDATION = 0.0001 MIN_TORCH_VERSION = version.parse('2.1') DEFAULT_ONNX_OPSET = 12 def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors, legacy=legacy) if task == 'zero-shot-object-detection': logger.warning('The batch size of this model will not be dynamic because non-maximum suppression is performed. Make sure to export the model with the same batch size as the one you will use at inference with `--batch_size N`.') @property def outputs(self) -> Dict[str, Dict[int, str]]: outputs = {} if self.task == 'feature-extraction': outputs['logits_per_image'] = {0: 'image_batch_size', 1: 'text_batch_size'} outputs['logits_per_text'] = {0: 'text_batch_size', 1: 'image_batch_size'} elif self.task == 'zero-shot-object-detection': outputs['logits'] = {0: 'image_batch_size', 2: 'num_queries'} outputs['pred_boxes'] = {0: 'image_batch_size', 1: 'num_boxes'} outputs['text_embeds'] = {0: 'text_batch_size', 1: 'max_text_queries'} outputs['image_embeds'] = {0: 'image_batch_size'} return outputs class OwlV2OnnxConfig(OwlViTOnnxConfig): MIN_TRANSFORMERS_VERSION = version.parse('4.35.0') class LayoutLMOnnxConfig(TextAndVisionOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(allow_new=True, MAX_2D_POSITION_EMBEDDINGS='max_2d_position_embeddings') @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'bbox': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}, 'token_type_ids': {0: 'batch_size', 1: 'sequence_length'}} class LayoutLMv3OnnxConfig(TextAndVisionOnnxConfig): MIN_TORCH_VERSION = version.parse('1.12') NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(allow_new=True, MAX_2D_POSITION_EMBEDDINGS='max_2d_position_embeddings', image_size='input_size') DEFAULT_ONNX_OPSET = 12 @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.task in ['text-classification', 'question-answering']: pixel_values_dynamic_axes = {0: 'batch_size', 1: 'num_channels', 2: 'height', 3: 'width'} else: pixel_values_dynamic_axes = {0: 'batch_size', 1: 'num_channels'} return {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}, 'bbox': {0: 'batch_size', 1: 'sequence_length'}, 'pixel_values': pixel_values_dynamic_axes} class LiltOnnxConfig(TextAndVisionOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig.with_args(allow_new=True, MAX_2D_POSITION_EMBEDDINGS='max_2d_position_embeddings') @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_ids': {0: 'batch_size', 1: 'sequence_length'}, 'bbox': {0: 'batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}} class Data2VecTextOnnxConfig(DistilBertOnnxConfig): pass class Data2VecVisionOnnxConfig(ViTOnnxConfig): DEFAULT_ONNX_OPSET = 11 class Data2VecAudioOnnxConfig(AudioOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedConfig ATOL_FOR_VALIDATION = 0.0001 DEFAULT_ONNX_OPSET = 14 class PerceiverDummyInputGenerator(DummyVisionInputGenerator): def __init__(self, task: str, normalized_config: NormalizedVisionConfig, batch_size: int=DEFAULT_DUMMY_SHAPES['batch_size'], num_channels: int=DEFAULT_DUMMY_SHAPES['num_channels'], width: int=DEFAULT_DUMMY_SHAPES['width'], height: int=DEFAULT_DUMMY_SHAPES['height'], **kwargs): super().__init__(task=task, normalized_config=normalized_config, batch_size=batch_size, num_channels=num_channels, width=width, height=height, **kwargs) from transformers.onnx.utils import get_preprocessor preprocessor = get_preprocessor(normalized_config._name_or_path) if preprocessor is not None and hasattr(preprocessor, 'size'): self.height = preprocessor.size.get('height', self.height) self.width = preprocessor.size.get('width', self.width) def generate(self, input_name: str, framework: str='pt', int_dtype: str='int64', float_dtype: str='fp32'): input_ = super().generate(input_name=input_name, framework=framework, int_dtype=int_dtype, float_dtype=float_dtype) return input_ class PerceiverOnnxConfig(TextAndVisionOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig DUMMY_INPUT_GENERATOR_CLASSES = (PerceiverDummyInputGenerator,) + TextAndVisionOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors, legacy=legacy) self.is_generating_dummy_inputs = False @property def inputs_name(self): if self.is_generating_dummy_inputs: if self.task in ['fill-mask', 'text-classification']: return 'input_ids' else: return 'pixel_values' else: return 'inputs' @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.inputs_name in ['input_ids', 'inputs']: dynamic_axis = {0: 'batch_size', 1: 'sequence_length'} return {'input_ids': dynamic_axis, 'attention_mask': dynamic_axis} else: dynamic_axis = {0: 'batch_size', 1: 'sequence_length', 2: 'width', 3: 'height'} return {'pixel_values': dynamic_axis} def generate_dummy_inputs(self, framework: str='pt', **kwargs): self.is_generating_dummy_inputs = True dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs) dummy_inputs[self.inputs_name] = dummy_inputs.pop(self.inputs_name) return dummy_inputs class HubertOnnxConfig(AudioOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedConfig DEFAULT_ONNX_OPSET = 14 class Wav2Vec2OnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 14 class Wav2Vec2ConformerOnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 11 class SEWOnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 14 class SEWDOnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 12 class UniSpeechOnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 14 class UniSpeechSATOnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 14 class WavLMOnnxConfig(HubertOnnxConfig): DEFAULT_ONNX_OPSET = 12 def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return WavLMModelPatcher(self, model, model_kwargs=model_kwargs) class ASTDummyAudioInputGenerator(DummyAudioInputGenerator): def generate(self, input_name: str, framework: str='pt', int_dtype: str='int64', float_dtype: str='fp32'): shape = [self.batch_size, self.normalized_config.max_length, self.normalized_config.num_mel_bins] if input_name == 'input_values': return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework, dtype=float_dtype) return super().generate(input_name, framework=framework, int_dtype=int_dtype, float_dtype=float_dtype) class ASTOnnxConfig(OnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(num_mel_bins='num_mel_bins', max_length='max_length', allow_new=True) DUMMY_INPUT_GENERATOR_CLASSES = (ASTDummyAudioInputGenerator,) ATOL_FOR_VALIDATION = 0.0001 DEFAULT_ONNX_OPSET = 14 @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_values': {0: 'batch_size'}} class WhisperOnnxConfig(AudioToTextOnnxConfig): DEFAULT_ONNX_OPSET = 14 NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(encoder_num_layers='encoder_layers', decoder_num_layers='decoder_layers', feature_size='num_mel_bins', allow_new=True) ATOL_FOR_VALIDATION = 0.001 @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.task == 'audio-classification': common_inputs = {'input_features': {0: 'batch_size'}} else: common_inputs = super().inputs if self._behavior is not ConfigBehavior.DECODER: common_inputs['input_features'] = {0: 'batch_size'} if self._behavior is not ConfigBehavior.ENCODER and self.use_past_in_inputs: if check_if_transformers_greater('4.43.0'): common_inputs['cache_position'] = {0: 'decoder_sequence_length'} if self._behavior is ConfigBehavior.DECODER and (not self.use_past_in_inputs): common_inputs['encoder_outputs'][1] = f"{common_inputs['encoder_outputs'][1]} / 2" return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = super().outputs if self._behavior is ConfigBehavior.ENCODER: common_outputs['last_hidden_state'][1] = f"{common_outputs['last_hidden_state'][1]} / 2" return common_outputs class MusicgenOnnxConfig(OnnxSeq2SeqConfigWithPast): DEFAULT_ONNX_OPSET = 14 VARIANTS = {'text-conditional-with-past': 'Exports Musicgen to ONNX to generate audio samples conditioned on a text prompt (Reference: https://huggingface.co/docs/transformers/model_doc/musicgen#text-conditional-generation). This uses the decoder KV cache. The following subcomponents are exported:\n\t\t* text_encoder.onnx: corresponds to the text encoder part in https://github.com/huggingface/transformers/blob/v4.39.1/src/transformers/models/musicgen/modeling_musicgen.py#L1457.\n\t\t* encodec_decode.onnx: corresponds to the Encodec audio encoder part in https://github.com/huggingface/transformers/blob/v4.39.1/src/transformers/models/musicgen/modeling_musicgen.py#L2472-L2480.\n\t\t* decoder_model.onnx: The Musicgen decoder, without past key values input, and computing cross attention. Not required at inference (use decoder_model_merged.onnx instead).\n\t\t* decoder_with_past_model.onnx: The Musicgen decoder, with past_key_values input (KV cache filled), not computing cross attention. Not required at inference (use decoder_model_merged.onnx instead).\n\t\t* decoder_model_merged.onnx: The two previous models fused in one, to avoid duplicating weights. A boolean input `use_cache_branch` allows to select the branch to use. In the first forward pass where the KV cache is empty, dummy past key values inputs need to be passed and are ignored with use_cache_branch=False.\n\t\t* build_delay_pattern_mask.onnx: A model taking as input `input_ids`, `pad_token_id`, `max_length`, and building a delayed pattern mask to the input_ids. Implements https://github.com/huggingface/transformers/blob/v4.39.3/src/transformers/models/musicgen/modeling_musicgen.py#L1054.'} DEFAULT_VARIANT = 'text-conditional-with-past' NORMALIZED_CONFIG_CLASS = NormalizedEncoderDecoderConfig DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummyCodegenDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator, DummyEncodecInputGenerator, DummyIntGenerator) DUMMY_PKV_GENERATOR_CLASS = DummySeq2SeqPastKeyValuesGenerator def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, behavior: ConfigBehavior=ConfigBehavior.ENCODER, preprocessors: Optional[List[Any]]=None, model_part: Optional[Literal['text_encoder', 'encodec_decode', 'decoder', 'build_delay_pattern_mask']]=None, legacy: bool=False, variant: str='text-conditional-with-past'): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, behavior=behavior, preprocessors=preprocessors, legacy=legacy) if legacy: raise ValueError('Musicgen does not support legacy=True.') if model_part in ['text_encoder', 'encodec_decode', 'build_delay_pattern_mask'] and behavior != ConfigBehavior.ENCODER: raise ValueError(f'model_part is {model_part} and behavior is {behavior}. This is not supported, please open an issue at https://github.com/huggingface/optimum/issues.') if model_part == 'decoder' and behavior != ConfigBehavior.DECODER: raise ValueError(f'model_part is {model_part} and behavior is {behavior}. This is not supported, please open an issue at https://github.com/huggingface/optimum/issues.') if behavior == ConfigBehavior.MONOLITH: raise ValueError('Musicgen does not support behavior=ConfigBehavior.MONOLITH. Please open an issue at https://github.com/huggingface/optimum/issues.') if config.audio_encoder.model_type != 'encodec': raise ValueError(f'Optimum ONNX export for Musicgen supports only Encodec as the audio encoder, got: {config.audio_encoder.model_type}. Please open an issue at https://github.com/huggingface/optimum/issues.') if config.audio_encoder.chunk_length_s is not None: raise ValueError(f'Musicgen ONNX export currently does not support audio_encoder.chunk_length_s not None (got {config.audio_encoder.chunk_length_s}). Please open an issue at https://github.com/huggingface/optimum/issues.') self.model_part = model_part if self.model_part == 'decoder': self.use_past = True self._normalized_config.ENCODER_NORMALIZED_CONFIG_CLASS = NormalizedTextConfig(self._config.text_encoder) self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS = NormalizedConfig(self._config.decoder) self._normalized_config.decoder_num_layers = self._config.decoder.num_hidden_layers self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS.num_layers = self._config.decoder.num_hidden_layers self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS.encoder_num_attention_heads = self._config.decoder.num_attention_heads self._normalized_config.DECODER_NORMALIZED_CONFIG_CLASS.decoder_num_attention_heads = self._config.decoder.num_attention_heads @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.model_part == 'text_encoder': common_inputs = {'input_ids': {0: 'batch_size', 1: 'encoder_sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'encoder_sequence_length'}} elif self.model_part == 'encodec_decode': common_inputs = {'audio_codes': {1: 'batch_size', 3: 'chunk_length'}} elif self.model_part == 'build_delay_pattern_mask': common_inputs = {'input_ids': {0: 'batch_size_x_num_codebooks'}, 'pad_token_id': {}, 'max_length': {}} elif self._behavior is ConfigBehavior.DECODER: common_inputs = {'decoder_input_ids': {0: 'total_batch_size_x_num_codebooks'}, 'encoder_outputs': {0: 'total_batch_size', 1: 'encoder_sequence_length'}, 'attention_mask': {0: 'batch_size', 1: 'encoder_sequence_length'}} if self.use_past_in_inputs: self.add_past_key_values(common_inputs, direction='inputs') else: common_inputs['decoder_input_ids'] = {0: 'total_batch_size_x_num_codebooks', 1: 'decoder_sequence_length'} else: raise ValueError('This should not happen. Please open an issue at https://github.com/huggingface/optimum/issues.') return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = {} if self.model_part == 'text_encoder': common_outputs = super().outputs elif self.model_part == 'encodec_decode': common_outputs['audio_values'] = {0: 'batch_size', 2: 'audio_length'} elif self.model_part == 'build_delay_pattern_mask': common_outputs['input_ids_edited'] = {0: 'total_batch_size_x_num_codebooks'} common_outputs['delay_pattern_mask'] = {0: 'total_batch_size_x_num_codebooks', 1: 'max_length'} elif self._behavior is ConfigBehavior.DECODER: common_outputs = super().outputs common_outputs = {'logits' if name == 'last_hidden_state' else name: value for (name, value) in common_outputs.items()} else: raise ValueError('This should not happen. Please open an issue at https://github.com/huggingface/optimum/issues.') return common_outputs def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') if direction == 'inputs': decoder_sequence_name = 'past_decoder_sequence_length' name = 'past_key_values' else: decoder_sequence_name = 'past_decoder_sequence_length + 1' name = 'present' for i in range(self._normalized_config.decoder_num_layers): inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'total_batch_size', 2: decoder_sequence_name} inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'total_batch_size', 2: decoder_sequence_name} if self.is_merged is True or (self._behavior is ConfigBehavior.DECODER and (not self.use_past_in_inputs)) or direction == 'inputs': inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'total_batch_size', 2: 'encoder_sequence_length_out'} inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'total_batch_size', 2: 'encoder_sequence_length_out'} def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return MusicgenModelPatcher(self, model, model_kwargs=model_kwargs) @property def torch_to_onnx_input_map(self) -> Dict[str, str]: if self._behavior is ConfigBehavior.DECODER: return {'decoder_input_ids': 'input_ids', 'encoder_outputs': 'encoder_hidden_states', 'attention_mask': 'encoder_attention_mask'} return {} def post_process_exported_models(self, path: Path, models_and_onnx_configs: Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel', 'ModelMixin'], 'OnnxConfig']], onnx_files_subpaths: List[str]): if 'with-past' in self.variant: decoder_path = Path(path, onnx_files_subpaths[2]) decoder_with_past_path = Path(path, onnx_files_subpaths[3]) decoder_merged_path = Path(path, ONNX_DECODER_MERGED_NAME + '.onnx') try: merge_decoders(decoder=decoder_path, decoder_with_past=decoder_with_past_path, save_path=decoder_merged_path, strict=False) except Exception as e: raise Exception(f'Unable to merge decoders. Detailed error: {e}') text_encoder_path = onnx_files_subpaths[0] encodec_decode_path = onnx_files_subpaths[1] build_delay_pattern_mask_path = onnx_files_subpaths[4] onnx_files_subpaths_new = [text_encoder_path, encodec_decode_path, decoder_merged_path.name, decoder_merged_path.name, build_delay_pattern_mask_path] models_and_onnx_configs[ONNX_DECODER_NAME][1].is_merged = True models_and_onnx_configs[ONNX_DECODER_NAME][1].use_cache_branch = False models_and_onnx_configs[ONNX_DECODER_NAME][1].use_past_in_inputs = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1].use_cache_branch = True models_and_onnx_configs[ONNX_DECODER_WITH_PAST_NAME][1].is_merged = True else: onnx_files_subpaths_new = onnx_files_subpaths return (models_and_onnx_configs, onnx_files_subpaths_new) def overwrite_shape_and_generate_input(self, dummy_input_gen: 'DummyInputGenerator', input_name: str, framework: str, input_shapes: Dict): if self.model_part == 'build_delay_pattern_mask' and input_name == 'input_ids': original_batch_size = dummy_input_gen.batch_size dummy_input_gen.batch_size = original_batch_size * dummy_input_gen.normalized_config.DECODER_NORMALIZED_CONFIG_CLASS.num_codebooks dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) dummy_input_gen.batch_size = original_batch_size else: dummy_input = super().overwrite_shape_and_generate_input(dummy_input_gen, input_name, framework, input_shapes) return dummy_input class SpeechT5OnnxConfig(OnnxSeq2SeqConfigWithPast): NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(hidden_size='hidden_size', num_attention_heads='encoder_attention_heads', encoder_num_layers='encoder_layers', decoder_num_layers='decoder_layers', allow_new=True) DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator, DummySpeechT5InputGenerator) DUMMY_PKV_GENERATOR_CLASS = DummySeq2SeqPastKeyValuesGenerator VARIANTS = {'with-past': 'The export follows the Transformers implementation using the KV cache, with the following components exported:\n\t - encoder_model.onnx: corresponds to the encoding part in https://github.com/huggingface/transformers/blob/v4.33.2/src/transformers/models/speecht5/modeling_speecht5.py#L2544-L2556.\n\t - decoder_model.onnx: corresponds to the decoder part in https://github.com/huggingface/transformers/blob/v4.33.2/src/transformers/models/speecht5/modeling_speecht5.py#L2572-L2602.\n\t - decoder_with_past_model.onnx: same as the above, with past_key_values input (KV cache filled).\n\t - decoder_postnet_and_vocoder.onnx: Decoder speech postnet and vocoder (e.g. a SpeechT5HifiGan) to generate speech from the spectrogram, as in https://github.com/huggingface/transformers/blob/v4.33.2/src/transformers/models/speecht5/modeling_speecht5.py#L2605-L2614.', 'without-past': 'The same as `with-past`, just without KV cache support. This is not a recommended export as slower than `with-past`.'} DEFAULT_VARIANT = 'with-past' def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', use_past: bool=False, use_past_in_inputs: bool=False, behavior: ConfigBehavior=ConfigBehavior.MONOLITH, preprocessors: Optional[List[Any]]=None, is_postnet_and_vocoder: bool=False, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, use_past=use_past, use_past_in_inputs=use_past_in_inputs, behavior=behavior, preprocessors=preprocessors, legacy=legacy) if float_dtype == 'fp16': raise ValueError('The ONNX export of SpeechT5 in float16 is currently not supported due to a bug in PyTorch: https://github.com/pytorch/pytorch/pull/110078. Please open an issue in Optimum if you would like to export SpeechT5 in float16.') self.is_postnet_and_vocoder = is_postnet_and_vocoder @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {} if self._behavior is ConfigBehavior.ENCODER: common_inputs['input_ids'] = {1: 'encoder_sequence_length'} elif self._behavior is ConfigBehavior.DECODER: common_inputs['output_sequence'] = {1: 'decoder_sequence_length'} common_inputs['speaker_embeddings'] = {} common_inputs['encoder_outputs'] = {1: 'encoder_sequence_length'} common_inputs['encoder_attention_mask'] = {1: 'encoder_sequence_length'} if self.variant == 'with-past' and self.use_past_in_inputs: self.add_past_key_values(common_inputs, direction='inputs') elif self.is_postnet_and_vocoder: common_inputs['spectrogram'] = {0: 'n_spectrums x reduction_factor'} else: raise ValueError('self._behavior is neither encoder or decoder, and is_postnet_and_vocoder=False. This should not happen.') return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = {} if self._behavior is ConfigBehavior.ENCODER: common_outputs['encoder_outputs'] = {1: 'encoder_sequence_length'} common_outputs['encoder_attention_mask'] = {1: 'encoder_sequence_length'} elif self._behavior is ConfigBehavior.DECODER: common_outputs['output_sequence_out'] = {1: 'decoder_sequence_length + 1'} common_outputs['spectrum'] = {} common_outputs['prob'] = {} if self.variant == 'with-past' and self.use_past: self.add_past_key_values(common_outputs, direction='outputs') elif self.is_postnet_and_vocoder: common_outputs['waveform'] = {0: 'n_samples'} else: raise ValueError('self._behavior is neither encoder or decoder, and is_postnet_and_vocoder=False. This should not happen.') return common_outputs def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return SpeechT5ModelPatcher(self, model, model_kwargs=model_kwargs) @property def torch_to_onnx_input_map(self) -> Dict[str, str]: return {'encoder_outputs': 'encoder_hidden_states'} def overwrite_shape_and_generate_input(self, dummy_input_gen: 'DummyInputGenerator', input_name: str, framework: str, input_shapes: Dict): dummy_input_gen.batch_size = 1 dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) return dummy_input def add_past_key_values(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') if direction == 'inputs': decoder_sequence_name = 'past_decoder_sequence_length' name = 'past_key_values' else: decoder_sequence_name = 'past_decoder_sequence_length + 1' name = 'present' for i in range(self._normalized_config.decoder_num_layers): inputs_or_outputs[f'{name}.{i}.decoder.key'] = {2: decoder_sequence_name} inputs_or_outputs[f'{name}.{i}.decoder.value'] = {2: decoder_sequence_name} if self.is_merged is True or (self._behavior is ConfigBehavior.DECODER and (not self.use_past_in_inputs)) or direction == 'inputs': inputs_or_outputs[f'{name}.{i}.encoder.key'] = {2: 'encoder_sequence_length_out'} inputs_or_outputs[f'{name}.{i}.encoder.value'] = {2: 'encoder_sequence_length_out'} class VitsOnnxConfig(TextEncoderOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedTextConfig ATOL_FOR_VALIDATION = 0.0001 @property def inputs(self) -> Dict[str, Dict[int, str]]: return {'input_ids': {0: 'text_batch_size', 1: 'sequence_length'}, 'attention_mask': {0: 'text_batch_size', 1: 'sequence_length'}} @property def outputs(self) -> Dict[str, Dict[int, str]]: return {'waveform': {0: 'text_batch_size', 1: 'n_samples'}, 'spectrogram': {0: 'text_batch_size', 2: 'num_bins'}} class Speech2TextDummyAudioInputGenerator(DummyAudioInputGenerator): def generate(self, input_name: str, framework: str='pt', int_dtype: str='int64', float_dtype: str='fp32'): shape = [self.batch_size, self.sequence_length, self.normalized_config.input_features_per_channel] if input_name == 'input_features': return self.random_float_tensor(shape, min_value=-1, max_value=1, framework=framework, dtype=float_dtype) return super().generate(input_name, framework=framework) class Speech2TextOnnxConfig(AudioToTextOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(decoder_num_layers='decoder_layers', num_layers='decoder_layers', input_features_per_channel='input_feat_per_channel', allow_new=True) DUMMY_INPUT_GENERATOR_CLASSES = (Speech2TextDummyAudioInputGenerator,) + AudioToTextOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[1:] + (DummyTextInputGenerator,) ATOL_FOR_VALIDATION = 0.0001 @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {} if self._behavior is not ConfigBehavior.DECODER: common_inputs['input_features'] = {0: 'batch_size', 1: 'feature_size', 2: 'encoder_sequence_length'} common_inputs['attention_mask'] = {0: 'batch_size', 1: 'encoder_sequence_length'} if self._behavior is not ConfigBehavior.ENCODER: if self.use_past_in_inputs: common_inputs['decoder_input_ids'] = {0: 'batch_size'} else: common_inputs['decoder_input_ids'] = {0: 'batch_size', 1: 'decoder_sequence_length'} if self.use_past_in_inputs: self.add_past_key_values(common_inputs, direction='inputs') if self._behavior is ConfigBehavior.DECODER: common_inputs['encoder_outputs'] = {0: 'batch_size', 1: f'encoder_sequence_length / {2 * self._config.num_conv_layers}'} return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = super().outputs if self._behavior is ConfigBehavior.ENCODER: common_outputs['last_hidden_state'][1] = f"{common_outputs['last_hidden_state'][1]} / {2 * self._config.num_conv_layers}" return common_outputs class TrOCROnnxConfig(TextSeq2SeqOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedSeq2SeqConfig.with_args(decoder_num_layers='decoder_layers', num_layers='decoder_layers', decoder_num_attention_heads='decoder_attention_heads', hidden_size='hidden_size') class VisionEncoderDecoderOnnxConfig(EncoderDecoderBaseOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedEncoderDecoderConfig ATOL_FOR_VALIDATION = 0.001 DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator, DummyVisionEncoderDecoderPastKeyValuesGenerator) @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = {} if self._behavior is not ConfigBehavior.DECODER: common_inputs['pixel_values'] = {0: 'batch_size', 1: 'num_channels', 2: 'height', 3: 'width'} if self._behavior is not ConfigBehavior.ENCODER: if self.use_past_in_inputs: common_inputs['decoder_input_ids'] = {0: 'batch_size'} else: common_inputs['decoder_input_ids'] = {0: 'batch_size', 1: 'decoder_sequence_length'} if self.use_past_in_inputs: self.add_past_key_values(common_inputs, direction='inputs') if self._behavior is ConfigBehavior.DECODER: common_inputs['encoder_outputs'] = {0: 'batch_size', 1: 'encoder_sequence_length'} return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: if self._behavior == ConfigBehavior.ENCODER: return self._encoder_onnx_config.outputs else: return super().outputs def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return VisionEncoderDecoderPatcher(self, model, model_kwargs=model_kwargs) class SamOnnxConfig(OnnxConfig): MIN_TRANSFORMERS_VERSION = version.parse('4.29.0.dev0') MIN_TORCH_VERSION = version.parse('2.0.99') NORMALIZED_CONFIG_CLASS = NormalizedEncoderDecoderConfig DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator, DummyPointsGenerator, DummyVisionEmbeddingsGenerator) DEFAULT_ONNX_OPSET = 13 VARIANTS = {'monolith': 'All the SAM model components are exported as a single model.onnx.', 'split': 'The vision encoder is exported as a separate vision_encoder.onnx, and the prompt encoder and mask decoder are exported as a prompt_encoder_mask_decoder.onnx. This allows to encoder the image only once for multiple point queries.'} DEFAULT_VARIANT = 'split' def __init__(self, config: 'PretrainedConfig', task: str='feature-extraction', int_dtype: str='int64', float_dtype: str='fp32', variant: str='split', vision_encoder: Optional[bool]=None, preprocessors: Optional[List[Any]]=None, legacy: bool=False): super().__init__(config=config, task=task, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors, legacy=legacy) self.variant = variant self.vision_encoder = vision_encoder self._normalized_config.ENCODER_NORMALIZED_CONFIG_CLASS = NormalizedVisionConfig(self._config.vision_config) @property def inputs(self) -> Dict[str, Dict[int, str]]: if self.variant == 'monolith': inputs = {'pixel_values': {0: 'batch_size'}, 'input_points': {0: 'batch_size', 1: 'point_batch_size', 2: 'nb_points_per_image'}, 'input_labels': {0: 'batch_size', 1: 'point_batch_size', 2: 'nb_points_per_image'}} elif self.vision_encoder: inputs = {'pixel_values': {0: 'batch_size'}} else: inputs = {'image_positional_embeddings': {0: 'batch_size'}, 'image_embeddings': {0: 'batch_size'}, 'input_points': {0: 'batch_size', 1: 'point_batch_size', 2: 'nb_points_per_image'}, 'input_labels': {0: 'batch_size', 1: 'point_batch_size', 2: 'nb_points_per_image'}} return inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: if self.variant == 'split' and self.vision_encoder: return {'image_embeddings': {0: 'batch_size'}, 'image_positional_embeddings': {0: 'batch_size'}} else: return {'iou_scores': {0: 'batch_size', 1: 'point_batch_size'}, 'pred_masks': {0: 'batch_size', 1: 'point_batch_size'}} def patch_model_for_export(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None) -> 'ModelPatcher': return SAMModelPatcher(self, model, model_kwargs=model_kwargs) class Pix2StructNormalizedConfig(NormalizedSeq2SeqConfig): ENCODER_NUM_LAYERS = 'vision_config.num_hidden_layers' DECODER_NUM_LAYERS = 'text_config.num_layers' ENCODER_NUM_ATTENTION_HEADS = 'vision_config.num_attention_heads' DECODER_NUM_ATTENTION_HEADS = 'text_config.num_heads' HIDDEN_SIZE = 'text_config.hidden_size' VOCAB_SIZE = 'text_config.vocab_size' class Pix2StructOnnxConfig(OnnxSeq2SeqConfigWithPast): NORMALIZED_CONFIG_CLASS = Pix2StructNormalizedConfig DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqPastKeyValuesGenerator, DummyPix2StructInputGenerator) DEFAULT_ONNX_OPSET = 12 @property def inputs(self): common_inputs = {} common_inputs['attention_mask'] = {0: 'batch_size'} if self._behavior is not ConfigBehavior.DECODER: common_inputs['flattened_patches'] = {0: 'batch_size'} if self._behavior is not ConfigBehavior.ENCODER: if self.use_past_in_inputs: common_inputs['decoder_input_ids'] = {0: 'batch_size'} else: common_inputs['decoder_input_ids'] = {0: 'batch_size', 1: 'decoder_sequence_length'} if self._behavior is ConfigBehavior.DECODER: if self.use_past_in_inputs: self.add_past_key_values(common_inputs, direction='inputs') common_inputs['encoder_outputs'] = {0: 'batch_size'} common_inputs['decoder_attention_mask'] = {0: 'batch_size', 1: 'past_sequence_length + 1'} return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: if self._behavior is ConfigBehavior.ENCODER: common_outputs = {'last_hidden_state': {0: 'batch_size'}} else: common_outputs = super(OnnxConfigWithPast, self).outputs for (name, axes_names) in common_outputs.items(): if self._behavior is ConfigBehavior.ENCODER or 'encoder' in name: sequence_name = 'encoder_sequence_length' else: sequence_name = 'decoder_sequence_length' new_axes_names = {} for (axis_idx, axis_name) in axes_names.items(): if 'sequence' in axis_name: if self.use_past_in_inputs is False or self.is_merged is True: new_axes_names[axis_idx] = sequence_name else: new_axes_names[axis_idx] = '1' else: new_axes_names[axis_idx] = axis_name common_outputs[name] = new_axes_names if self.use_past: self.add_past_key_values(common_outputs, direction='outputs') return common_outputs @property def torch_to_onnx_input_map(self) -> Dict[str, str]: if self._behavior is ConfigBehavior.DECODER: return {'decoder_input_ids': 'input_ids', 'encoder_outputs': 'encoder_hidden_states', 'attention_mask': 'encoder_attention_mask'} return {} def generate_dummy_inputs_for_validation(self, reference_model_inputs: Dict[str, Any], onnx_input_names: Optional[List[str]]=None) -> Dict[str, Any]: if self._behavior is ConfigBehavior.DECODER: reference_model_inputs['input_ids'] = reference_model_inputs.pop('decoder_input_ids') if onnx_input_names is not None: if 'encoder_outputs' in reference_model_inputs: if 'encoder_hidden_states' in onnx_input_names: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] else: reference_model_inputs.pop('encoder_outputs') else: reference_model_inputs['encoder_hidden_states'] = reference_model_inputs.pop('encoder_outputs')[0] return super().generate_dummy_inputs_for_validation(reference_model_inputs) def _create_dummy_input_generator_classes(self, **kwargs) -> List['DummyInputGenerator']: dummy_inputs_generators = [] dummy_inputs_generators.append(self.DUMMY_INPUT_GENERATOR_CLASSES[0](self.task, self._normalized_config)) if self._preprocessors is None or len(self._preprocessors) != 2: raise ValueError(f'Preprocessors for pix2struct need to be available for the ONNX export to infer input static shapes. Got: {self._preprocessors}') encoder_sequence_length = self._preprocessors[1].image_processor.max_patches kwargs['preprocessors'] = self._preprocessors for cls_ in self.DUMMY_INPUT_GENERATOR_CLASSES[1:]: dummy_inputs_generators.append(cls_(self.task, self._normalized_config, encoder_sequence_length=encoder_sequence_length, **kwargs)) return dummy_inputs_generators def overwrite_shape_and_generate_input(self, dummy_input_gen: 'DummyInputGenerator', input_name: str, framework: str, input_shapes: Dict): if self._preprocessors is None or len(self._preprocessors) != 2: raise ValueError(f'Preprocessors for pix2struct need to be available for the ONNX export to infer input static shapes. Got: {self._preprocessors}') if self.use_past and self.use_past_in_inputs and (self.use_cache_branch is not False) and (input_name in ['decoder_input_ids', 'input_ids']): sequence_length = dummy_input_gen.sequence_length dummy_input_gen.sequence_length = 1 dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) dummy_input_gen.sequence_length = sequence_length elif input_name in ['encoder_outputs', 'attention_mask']: original_seq_length = dummy_input_gen.sequence_length dummy_input_gen.sequence_length = self._preprocessors[1].image_processor.max_patches dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) dummy_input_gen.sequence_length = original_seq_length else: dummy_input = dummy_input_gen.generate(input_name, framework=framework, int_dtype=self.int_dtype, float_dtype=self.float_dtype) return dummy_input class EncoderDecoderOnnxConfig(EncoderDecoderBaseOnnxConfig): NORMALIZED_CONFIG_CLASS = NormalizedEncoderDecoderConfig # File: optimum-main/optimum/exporters/onnx/model_patcher.py import dataclasses import functools import inspect import math import sys import types from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import transformers from packaging import version from transformers.models.speecht5.modeling_speecht5 import SpeechT5EncoderWithSpeechPrenet from transformers.utils import is_torch_available if is_torch_available(): import torch from ...configuration_utils import _transformers_version from ...utils import logging if _transformers_version > version.parse('4.34.99'): from transformers.modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask if _transformers_version >= version.parse('4.36'): from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask_for_sdpa else: _prepare_4d_causal_attention_mask = None _prepare_4d_causal_attention_mask_for_sdpa = None AttentionMaskConverter = None if _transformers_version >= version.parse('4.42'): from transformers.cache_utils import SlidingWindowCache, StaticCache if TYPE_CHECKING: from transformers import PreTrainedModel, TFPreTrainedModel from .base import OnnxConfig logger = logging.get_logger(__name__) def patch_everywhere(attribute_name: str, patch: Any, module_name_prefix: Optional[str]=None): for name in list(sys.modules): module = sys.modules[name] if module_name_prefix is not None and (not name.startswith(module_name_prefix)): continue if hasattr(module, attribute_name): setattr(module, attribute_name, patch) def override_arguments(args, kwargs, forward_signature, model_kwargs: Dict[str, Any]): args = list(args) for argument in model_kwargs: if argument in forward_signature.parameters: argument_index = list(forward_signature.parameters.keys()).index(argument) if argument in kwargs or len(args) <= argument_index: kwargs[argument] = model_kwargs[argument] else: args[argument_index] = model_kwargs[argument] else: kwargs[argument] = model_kwargs[argument] return (args, kwargs) @dataclasses.dataclass class PatchingSpec: o: Any name: str custom_op: Callable orig_op: Optional[Callable] = None op_wrapper: Optional[Callable] = None class ModelPatcher: def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): self._model = model patching_specs = config.PATCHING_SPECS self._patching_specs = [] for spec in patching_specs if patching_specs is not None else []: final_spec = spec if spec.orig_op is None: final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) self._patching_specs.append(final_spec) self.orig_forward_name = 'forward' if hasattr(self._model, 'forward') else 'call' self.orig_forward = getattr(self._model, self.orig_forward_name) self.model_kwargs = model_kwargs if model_kwargs is not None else {} if config.__class__.__name__ == 'OnnxConfigWithLoss': self.real_config = config._onnx_config else: self.real_config = config allow_past_in_outputs = hasattr(self.real_config, 'use_past') and self.real_config.use_past @functools.wraps(self.orig_forward) def patched_forward(*args, **kwargs): signature = inspect.signature(self.orig_forward) (args, kwargs) = override_arguments(args, kwargs, signature, model_kwargs=self.model_kwargs) outputs = self.orig_forward(*args, **kwargs) filterd_outputs = {} if isinstance(outputs, dict): for (name, value) in outputs.items(): onnx_output_name = config.torch_to_onnx_output_map.get(name, name) if onnx_output_name in config.outputs or (allow_past_in_outputs and name.startswith('past_key_values')) or any((key.startswith(onnx_output_name) for key in config.outputs.keys())): filterd_outputs[name] = value elif isinstance(outputs, (list, tuple)): outputs_list = list(config.outputs.keys()) dict(zip(outputs_list, outputs)) else: if len(config.outputs) > 1: num_outputs = len(config.outputs) outputs_str = ', '.join(config.outputs.keys()) raise ValueError(f'config.outputs should have only one outputs, but it has {num_outputs} keys: {outputs_str}') else: name = list(config.outputs.keys())[0] filterd_outputs[name] = outputs name = list(config.outputs.keys())[0] filterd_outputs[name] = outputs return filterd_outputs self.patched_forward = patched_forward def patch_ops(self): for spec in self._patching_specs: custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op) setattr(spec.o, spec.name, custom_op) def restore_ops(self): for spec in self._patching_specs: orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op) setattr(spec.o, spec.name, orig_op) def __enter__(self): self.patch_ops() setattr(self._model, self.orig_forward_name, self.patched_forward) def __exit__(self, exc_type, exc_value, traceback): self.restore_ops() setattr(self._model, self.orig_forward_name, self.orig_forward) def __call__(self, *args, **kwargs): if getattr(self._model, self.orig_forward_name) is self.orig_forward: logger.warning('Running the non-patched model') return self._model(*args, **kwargs) class Seq2SeqModelPatcher(ModelPatcher): def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) allow_past_in_outputs = hasattr(self.real_config, 'use_past') and self.real_config.use_past if model.config.model_type == 'pix2struct' and allow_past_in_outputs: model.config.text_config.use_cache = True @functools.wraps(self.orig_forward) def patched_forward(*args, **kwargs): signature = inspect.signature(self.orig_forward) (args, kwargs) = override_arguments(args, kwargs, signature, model_kwargs=self.model_kwargs) outputs = self.orig_forward(*args, **kwargs) filterd_outputs = {} for (name, value) in outputs.items(): onnx_output_name = config.torch_to_onnx_output_map.get(name, name) if onnx_output_name in config.outputs or (allow_past_in_outputs and name.startswith('past_key_values')) or any((key.startswith(onnx_output_name) for key in config.outputs.keys())): if name != 'past_key_values': if self.real_config._behavior == 'decoder' and name == 'encoder_last_hidden_state': continue else: filterd_outputs[name] = value elif self.real_config._behavior == 'monolith' or (self.real_config._behavior == 'decoder' and (self.real_config.is_merged or not self.real_config.use_past_in_inputs)): filterd_outputs[name] = value elif self.real_config._behavior == 'decoder' and self.real_config.use_past_in_inputs: filterd_outputs[name] = tuple([v[:2] for v in value]) return filterd_outputs self.patched_forward = patched_forward class VisionEncoderDecoderPatcher(Seq2SeqModelPatcher): def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) use_cache = hasattr(self.real_config, 'use_past') if config._behavior == 'decoder' and model.config.decoder.model_type == 'trocr' and use_cache: model.decoder.model.decoder.config.use_cache = True def _unmask_unattended_patched_legacy(expanded_mask: torch.Tensor, attention_mask: torch.Tensor, unmasked_value: Union[bool, float]): return expanded_mask def _unmask_unattended_patched(expanded_mask: torch.Tensor, min_dtype: float): return expanded_mask def _make_causal_mask_patched(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int=0, sliding_window: Optional[int]=None): (bsz, tgt_len) = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) if sliding_window is not None: diagonal = past_key_values_length - sliding_window + 1 context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int64), diagonal=diagonal) mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) _make_causal_mask_patched_staticmethod = staticmethod(_make_causal_mask_patched) if _transformers_version >= version.parse('4.39.0'): _unmask_unattended_patched_staticmethod = staticmethod(_unmask_unattended_patched) else: _unmask_unattended_patched_staticmethod = staticmethod(_unmask_unattended_patched_legacy) def _prepare_4d_causal_attention_mask_for_sdpa_patched(attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, Tuple, List], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None): attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = input_shape[-1] + past_key_values_length if attention_mask is not None: attention_mask = attn_mask_converter.to_4d(attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype) else: attention_mask = attn_mask_converter.to_causal_4d(input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device) return attention_mask class DecoderModelPatcher(ModelPatcher): def __enter__(self): super().__enter__() if AttentionMaskConverter is not None: AttentionMaskConverter._make_causal_mask = _make_causal_mask_patched_staticmethod if _transformers_version >= version.parse('4.36'): AttentionMaskConverter._unmask_unattended = _unmask_unattended_patched_staticmethod if _transformers_version >= version.parse('4.36'): patch_everywhere('_prepare_4d_causal_attention_mask_for_sdpa', _prepare_4d_causal_attention_mask_for_sdpa_patched) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if AttentionMaskConverter is not None: AttentionMaskConverter._make_causal_mask = staticmethod(self.original_make_causal) if _transformers_version >= version.parse('4.36'): AttentionMaskConverter._unmask_unattended = staticmethod(self.original_unmask_unattended) if _transformers_version >= version.parse('4.36'): patch_everywhere('_prepare_4d_causal_attention_mask_for_sdpa', self.original_prepare_4d_causal_attention_mask_for_sdpa) def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) if _transformers_version >= version.parse('4.36'): self.original_prepare_4d_causal_attention_mask_for_sdpa = _prepare_4d_causal_attention_mask_for_sdpa self.original_unmask_unattended = AttentionMaskConverter._unmask_unattended if AttentionMaskConverter is not None: self.original_make_causal = AttentionMaskConverter._make_causal_mask def falcon_build_alibi_tensor_patched(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: (batch_size, seq_length) = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) class FalconModelPatcher(DecoderModelPatcher): def __enter__(self): super().__enter__() self.patch_ops() if self.real_config.task == 'text-generation': patch_everywhere('build_alibi_tensor', falcon_build_alibi_tensor_patched, module_name_prefix='transformers.models.falcon.modeling_falcon') def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) self.restore_ops() setattr(self._model, self.orig_forward_name, self.orig_forward) if self.real_config.task == 'text-generation': patch_everywhere('build_alibi_tensor', self.build_alibi_tensor_original, module_name_prefix='transformers.models.falcon.modeling_falcon') def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) self.build_alibi_tensor_original = transformers.models.falcon.modeling_falcon.build_alibi_tensor class WavLMModelPatcher(ModelPatcher): def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) allow_past_in_outputs = hasattr(self.real_config, 'use_past') and self.real_config.use_past @functools.wraps(self.orig_forward) def patched_forward(*args, **kwargs): model_kwargs = self.model_kwargs model_kwargs['output_attentions'] = True signature = inspect.signature(self.orig_forward) (args, kwargs) = override_arguments(args, kwargs, signature, model_kwargs=model_kwargs) outputs = self.orig_forward(*args, **kwargs) filterd_outputs = {} for (name, value) in outputs.items(): onnx_output_name = config.torch_to_onnx_output_map.get(name, name) if onnx_output_name in config.outputs or (allow_past_in_outputs and name.startswith('past_key_values')) or any((key.startswith(onnx_output_name) for key in config.outputs.keys())): filterd_outputs[name] = value return filterd_outputs self.patched_forward = patched_forward class SAMModelPatcher(ModelPatcher): def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) def patched_forward(pixel_values=None, input_points=None, input_labels=None, image_embeddings=None, image_positional_embeddings=None, return_dict=True, **kwargs): if config.variant == 'monolith': return self.orig_forward(pixel_values=pixel_values, input_points=input_points, input_labels=input_labels, image_embeddings=image_embeddings, return_dict=return_dict, **kwargs) elif config.variant == 'split': if config.vision_encoder: image_positional_embeddings = model.get_image_wide_positional_embeddings() batch_size = pixel_values.shape[0] image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1) vision_outputs = model.vision_encoder(pixel_values, output_attentions=False, output_hidden_states=False, return_dict=return_dict) image_embeddings = vision_outputs[0] if not return_dict: return (image_embeddings, image_positional_embeddings) else: return {'image_embeddings': image_embeddings, 'image_positional_embeddings': image_positional_embeddings} else: if input_points is None: raise ValueError('input_points is required to export the prompt encoder / mask decoder.') (sparse_embeddings, dense_embeddings) = model.prompt_encoder(input_points=input_points, input_labels=input_labels, input_boxes=None, input_masks=None) (low_res_masks, iou_predictions, _) = model.mask_decoder(image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=True, attention_similarity=None, target_embedding=None, output_attentions=False) if not return_dict: return (iou_predictions, low_res_masks) else: return {'iou_scores': iou_predictions, 'pred_masks': low_res_masks} self.patched_forward = patched_forward def patched_speecht5_prenet_forward(self, input_values: torch.Tensor, speaker_embeddings: Optional[torch.Tensor]=None): inputs_embeds = input_values for layer in self.layers: inputs_embeds = torch.nn.functional.relu(layer(inputs_embeds)) mask = torch.rand(inputs_embeds.shape, device=inputs_embeds.device) > self.config.speech_decoder_prenet_dropout inputs_embeds = inputs_embeds * mask / (1 - self.config.speech_decoder_prenet_dropout) inputs_embeds = self.final_layer(inputs_embeds) inputs_embeds = self.encode_positions(inputs_embeds) if speaker_embeddings is not None: speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings) speaker_embeddings = speaker_embeddings.unsqueeze(1) speaker_embeddings = speaker_embeddings.expand(-1, inputs_embeds.size(1), -1) inputs_embeds = torch.cat([inputs_embeds, speaker_embeddings], dim=-1) inputs_embeds = torch.nn.functional.relu(self.speaker_embeds_layer(inputs_embeds)) return inputs_embeds class SpeechT5ModelPatcher(ModelPatcher): def __enter__(self): self.patch_ops() self._model.speecht5.decoder.prenet.forward = types.MethodType(patched_speecht5_prenet_forward, self._model.speecht5.decoder.prenet) setattr(self._model, self.orig_forward_name, self.patched_forward) def __exit__(self, exc_type, exc_value, traceback): self.restore_ops() setattr(self._model, self.orig_forward_name, self.orig_forward) self._model.speecht5.decoder.prenet.forward = types.MethodType(self.original_speecht5_prenet_forward, self._model.speecht5.decoder.prenet) def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Dict[str, Any]): super().__init__(config, model, model_kwargs) self.original_speecht5_prenet_forward = model.speecht5.decoder.prenet.forward model.vocoder = model_kwargs['vocoder_model'].eval() def patched_forward(input_ids=None, speaker_embeddings=None, encoder_outputs=None, past_key_values=None, output_sequence=None, spectrogram=None, encoder_attention_mask=None): use_cache = self.real_config.use_past and self.real_config.variant == 'with-past' if self.real_config._behavior == 'encoder': encoder_attention_mask = torch.ones_like(input_ids) encoder_out = model.speecht5.encoder(input_values=input_ids, attention_mask=encoder_attention_mask, return_dict=True) if isinstance(model.speecht5.encoder, SpeechT5EncoderWithSpeechPrenet): encoder_attention_mask = model.speecht5.encoder.prenet._get_feature_vector_attention_mask(encoder_out[0].shape[1], encoder_attention_mask) result = {'encoder_outputs': encoder_out.last_hidden_state, 'encoder_attention_mask': encoder_attention_mask} elif self.real_config._behavior == 'decoder': encoder_hidden_states = encoder_outputs[0] decoder_hidden_states = model.speecht5.decoder.prenet(output_sequence, speaker_embeddings) decoder_out = model.speecht5.decoder.wrapped_decoder(hidden_states=decoder_hidden_states[:, -1:], attention_mask=None, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=False, return_dict=True) last_decoder_output = decoder_out.last_hidden_state[0, -1] past_key_values = decoder_out.past_key_values spectrum = model.speech_decoder_postnet.feat_out(last_decoder_output) spectrum = spectrum.view(model.config.reduction_factor, model.config.num_mel_bins) output_sequence = torch.cat((output_sequence, spectrum[-1].view(1, 1, model.config.num_mel_bins)), dim=1) prob = torch.sigmoid(model.speech_decoder_postnet.prob_out(last_decoder_output)) result = {'output_sequence_out': output_sequence, 'spectrum': spectrum, 'prob': prob, 'past_key_values': past_key_values} elif self.real_config.is_postnet_and_vocoder: spectrogram = spectrogram.unsqueeze(0) spectrogram = model.speech_decoder_postnet.postnet(spectrogram) spectrogram = spectrogram.squeeze(0) waveform = model.vocoder(spectrogram) result = {'waveform': waveform} else: raise ValueError('Should not happen') filterd_outputs = {} for (name, value) in result.items(): if name != 'past_key_values': filterd_outputs[name] = value elif self.real_config._behavior == 'decoder' and (self.real_config.is_merged or not self.real_config.use_past_in_inputs): filterd_outputs[name] = value elif self.real_config._behavior == 'decoder' and self.real_config.use_past_in_inputs: filterd_outputs[name] = tuple([v[:2] for v in value]) return filterd_outputs self.patched_forward = patched_forward class SentenceTransformersTransformerPatcher(ModelPatcher): def __enter__(self): super().__enter__() if _transformers_version >= version.parse('4.42') and self.real_config._config.model_type == 'mistral': self._model[0].auto_model._update_causal_mask = types.MethodType(_update_causal_mask_patched, self._model[0].auto_model) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if _transformers_version >= version.parse('4.42') and self.real_config._config.model_type == 'mistral': self._model[0].auto_model._update_causal_mask = types.MethodType(self._update_causal_mask_original, self._model[0].auto_model) def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Dict[str, Any]): super().__init__(config, model, model_kwargs) if _transformers_version >= version.parse('4.42') and self.real_config._config.model_type == 'mistral': self._update_causal_mask_original = self._model[0].auto_model._update_causal_mask def patched_forward(input_ids, attention_mask): result = self.orig_forward({'input_ids': input_ids, 'attention_mask': attention_mask}) if 'input_ids' in result: del result['input_ids'] if 'attention_mask' in result: del result['attention_mask'] if 'all_layer_embeddings' in result: del result['all_layer_embeddings'] return result self.patched_forward = patched_forward class SentenceTransformersCLIPPatcher(ModelPatcher): def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Dict[str, Any]): super().__init__(config, model, model_kwargs) def patched_forward(input_ids, attention_mask, pixel_values): vision_outputs = model[0].model.vision_model(pixel_values=pixel_values) image_embeds = model[0].model.visual_projection(vision_outputs[1]) text_outputs = model[0].model.text_model(input_ids=input_ids, attention_mask=attention_mask) text_embeds = model[0].model.text_projection(text_outputs[1]) if len(model) > 1: image_embeds = model[1:](image_embeds) text_embeds = model[1:](text_embeds) return {'text_embeds': text_embeds, 'image_embeds': image_embeds} self.patched_forward = patched_forward def triu_onnx(x, diagonal=0): (l, w) = x.shape arange_rows = torch.arange(l, device=x.device) arange_cols = torch.arange(w, device=x.device) mask = arange_cols.expand(l, w) arange_rows = arange_rows[:, None] + diagonal mask = mask >= arange_rows return x.masked_fill(mask == 0, 0) def patched_build_delay_pattern_mask(self, input_ids: torch.Tensor, pad_token_id: int, max_length: int=None): input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) (bsz, num_codebooks, seq_len) = input_ids.shape max_length = max_length if max_length is not None else self.generation_config.max_length input_ids_shifted = torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks if max_length < 2 * channel_codebooks - 1: raise NotImplementedError('Not supported in ONNX export. Please open an issue in Optimum repository.') for codebook in range(channel_codebooks): if self.config.audio_channels == 1: input_ids_shifted[:, codebook, codebook:seq_len + codebook] = input_ids[:, codebook] else: input_ids_shifted[:, 2 * codebook, codebook:seq_len + codebook] = input_ids[:, 2 * codebook] input_ids_shifted[:, 2 * codebook + 1, codebook:seq_len + codebook] = input_ids[:, 2 * codebook + 1] delay_pattern = triu_onnx(torch.ones((channel_codebooks, max_length), dtype=torch.int32), diagonal=max_length - channel_codebooks + 1) delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.int64)) delay_pattern = delay_pattern.to(torch.bool) if self.config.audio_channels == 2: delay_pattern = delay_pattern.repeat_interleave(2, dim=0) mask = ~delay_pattern.to(input_ids.device) input_ids = mask * input_ids_shifted + ~mask * pad_token_id first_codebook_ids = input_ids[:, 0, :] start_ids = (first_codebook_ids == -1).nonzero()[:, 1] first_start_id = start_ids.min() pattern_mask = input_ids.reshape(bsz * num_codebooks, -1) input_ids_edited = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1) return {'input_ids_edited': input_ids_edited, 'delay_pattern_mask': pattern_mask} class MusicgenModelPatcher(Seq2SeqModelPatcher): def __enter__(self): self.patch_ops() if self.real_config.model_part == 'build_delay_pattern_mask': self._model.forward = types.MethodType(patched_build_delay_pattern_mask, self._model) else: setattr(self._model, self.orig_forward_name, self.patched_forward) def __exit__(self, exc_type, exc_value, traceback): self.restore_ops() if self.real_config.model_part == 'build_delay_pattern_mask': self._model.forward = self.original_decoder_forward else: setattr(self._model, self.orig_forward_name, self.orig_forward) def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) if config.model_part == 'build_delay_pattern_mask': self.original_decoder_forward = self.orig_forward elif config.model_part == 'encodec_decode': @functools.wraps(self.orig_forward) def patched_forward(input_values: Optional['torch.Tensor']=None, padding_mask: Optional['torch.Tensor']=None, audio_codes: Optional['torch.Tensor']=None, bandwidth: Optional[float]=None, audio_scales: Optional['torch.Tensor']=None, return_dict: Optional[bool]=None): chunk_length = self.real_config._config.audio_encoder.chunk_length if chunk_length is None: if audio_scales is not None: audio_scales = audio_scales[0] if len(audio_codes) != 1: raise ValueError(f'Expected one frame, got {len(audio_codes)}') audio_values = self._model._decode_frame(audio_codes[0], audio_scales) else: raise ValueError('Not supported, a meaningful error should have been raised ahead.') decoded_frames = [] for (frame, scale) in zip(audio_codes, audio_scales): frames = self._model._decode_frame(frame, scale) decoded_frames.append(frames) audio_values = self._model._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1) if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., :padding_mask.shape[-1]] return {'audio_values': audio_values} self.patched_forward = patched_forward def _update_causal_mask_patched(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values, use_cache: bool, output_attentions: bool): if self._attn_implementation == 'flash_attention_2': if attention_mask is not None and use_cache: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError("You are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to call `tokenizer.padding_side = 'left'` before tokenizing the input. ") if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = cache_position[0] if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) if self.config._attn_implementation == 'sdpa' and (not (using_static_cache or using_sliding_window_cache)) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_sliding_window_cache: target_length = max(sequence_length, self.config.sliding_window) elif using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 if attention_mask is not None and attention_mask.dim() == 4: if attention_mask.max() != 0: raise ValueError('Custom 4D attention mask should be passed in inverted form with max==0`') causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) exclude_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) if self.config.sliding_window is not None: if not using_sliding_window_cache or sequence_length > self.config.sliding_window: exclude_mask = torch.bitwise_or(exclude_mask, torch.arange(target_length, device=device) <= cache_position.reshape(-1, 1) - self.config.sliding_window) causal_mask *= exclude_mask causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() if attention_mask.dim() == 2: mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class MistralModelPatcher(ModelPatcher): def __enter__(self): super().__enter__() if AttentionMaskConverter is not None: AttentionMaskConverter._make_causal_mask = _make_causal_mask_patched_staticmethod if _transformers_version >= version.parse('4.36'): AttentionMaskConverter._unmask_unattended = _unmask_unattended_patched_staticmethod if _transformers_version >= version.parse('4.36'): patch_everywhere('_prepare_4d_causal_attention_mask_for_sdpa', _prepare_4d_causal_attention_mask_for_sdpa_patched) if _transformers_version >= version.parse('4.42'): if hasattr(self._model, 'model'): self._model.model._update_causal_mask = types.MethodType(_update_causal_mask_patched, self._model.model) else: self._model._update_causal_mask = types.MethodType(_update_causal_mask_patched, self._model) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if AttentionMaskConverter is not None: AttentionMaskConverter._make_causal_mask = staticmethod(self.original_make_causal) if _transformers_version >= version.parse('4.36'): AttentionMaskConverter._unmask_unattended = staticmethod(self.original_unmask_unattended) if _transformers_version >= version.parse('4.36'): patch_everywhere('_prepare_4d_causal_attention_mask_for_sdpa', self.original_prepare_4d_causal_attention_mask_for_sdpa) if _transformers_version >= version.parse('4.42'): if hasattr(self._model, 'model'): self._model.model._update_causal_mask = types.MethodType(self._update_causal_mask_original, self._model.model) else: self._model._update_causal_mask = types.MethodType(self._update_causal_mask_original, self._model) def __init__(self, config: 'OnnxConfig', model: Union['PreTrainedModel', 'TFPreTrainedModel'], model_kwargs: Optional[Dict[str, Any]]=None): super().__init__(config, model, model_kwargs) if _transformers_version >= version.parse('4.36'): self.original_prepare_4d_causal_attention_mask_for_sdpa = _prepare_4d_causal_attention_mask_for_sdpa self.original_unmask_unattended = AttentionMaskConverter._unmask_unattended if AttentionMaskConverter is not None: self.original_make_causal = AttentionMaskConverter._make_causal_mask if _transformers_version >= version.parse('4.42'): if hasattr(self._model, 'model'): self._update_causal_mask_original = self._model.model._update_causal_mask else: self._update_causal_mask_original = self._model._update_causal_mask class CLIPModelPatcher(ModelPatcher): def __enter__(self): super().__enter__() if _transformers_version >= version.parse('4.43'): from transformers.models.clip.modeling_clip import CLIPAttention, CLIPSdpaAttention (self.original_sdpa_forward, CLIPSdpaAttention.forward) = (CLIPSdpaAttention.forward, CLIPAttention.forward) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if _transformers_version >= version.parse('4.43'): from transformers.models.clip.modeling_clip import CLIPSdpaAttention CLIPSdpaAttention.forward = self.original_sdpa_forward # File: optimum-main/optimum/exporters/onnx/utils.py """""" from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from transformers.utils import is_tf_available, is_torch_available from ...utils import DIFFUSERS_MINIMUM_VERSION, ORT_QUANTIZE_MINIMUM_VERSION, check_if_diffusers_greater, is_diffusers_available, logging from ...utils.import_utils import _diffusers_version from ..utils import _get_submodels_and_export_configs from ..utils import get_decoder_models_for_export as _get_decoder_models_for_export from ..utils import get_diffusion_models_for_export as _get_diffusion_models_for_export from ..utils import get_encoder_decoder_models_for_export as _get_encoder_decoder_models_for_export from ..utils import get_sam_models_for_export as _get_sam_models_for_export from ..utils import get_speecht5_models_for_export as _get_speecht5_models_for_export logger = logging.get_logger() if is_diffusers_available(): if not check_if_diffusers_greater(DIFFUSERS_MINIMUM_VERSION.base_version): raise ImportError(f'We found an older version of diffusers {_diffusers_version} but we require diffusers to be >= {DIFFUSERS_MINIMUM_VERSION}. Please update diffusers by running `pip install --upgrade diffusers`') if TYPE_CHECKING: from ..base import ExportConfig if is_torch_available(): from transformers.modeling_utils import PreTrainedModel if is_tf_available(): from transformers.modeling_tf_utils import TFPreTrainedModel if is_diffusers_available(): from diffusers import DiffusionPipeline, ModelMixin MODEL_TYPES_REQUIRING_POSITION_IDS = {'codegen', 'falcon', 'gemma', 'gpt2', 'gpt-bigcode', 'gpt-neo', 'gpt-neox', 'gptj', 'imagegpt', 'llama', 'mistral', 'phi', 'phi3', 'qwen2'} def check_onnxruntime_requirements(minimum_version: version.Version): try: import onnxruntime except ImportError: raise ImportError("ONNX Runtime doesn't seem to be currently installed. Please install ONNX Runtime by running `pip install onnxruntime` and relaunch the conversion.") ort_version = version.parse(onnxruntime.__version__) if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError(f'We found an older version of ONNX Runtime ({onnxruntime.__version__}) but we require the version to be >= {minimum_version} to enable all the conversions options.\nPlease update ONNX Runtime by running `pip install --upgrade onnxruntime`') def recursive_to_device(value: Union[Tuple, List, 'torch.Tensor'], device: str): if isinstance(value, tuple): value = list(value) for (i, val) in enumerate(value): value[i] = recursive_to_device(val, device) value = tuple(value) elif isinstance(value, list): for (i, val) in enumerate(value): value[i] = recursive_to_device(val, device) elif isinstance(value, torch.Tensor): value = value.to(device) return value def recursive_to_dtype(value: Union[Tuple, List, 'torch.Tensor'], dtype: Optional[torch.dtype], start_dtype: Optional[torch.dtype]=None): if dtype is None: return value if isinstance(value, tuple): value = list(value) for (i, val) in enumerate(value): value[i] = recursive_to_dtype(val, dtype) value = tuple(value) elif isinstance(value, list): for (i, val) in enumerate(value): value[i] = recursive_to_dtype(val, dtype) elif isinstance(value, torch.Tensor): if start_dtype is None or (start_dtype is not None and value.dtype == start_dtype): value = value.to(dtype=dtype) return value class PickableInferenceSession: def __init__(self, model_path, sess_options, providers): import onnxruntime as ort self.model_path = model_path self.sess_options = sess_options self.providers = providers self.sess = ort.InferenceSession(self.model_path, sess_options=sess_options, providers=providers) def run(self, *args): return self.sess.run(*args) def get_outputs(self): return self.sess.get_outputs() def get_inputs(self): return self.sess.get_inputs() def __getstate__(self): return {'model_path': self.model_path} def __setstate__(self, values): import onnxruntime as ort self.model_path = values['model_path'] self.sess = ort.InferenceSession(self.model_path, sess_options=self.sess_options, providers=self.providers) def _get_submodels_and_onnx_configs(model: Union['PreTrainedModel', 'TFPreTrainedModel'], task: str, monolith: bool, custom_onnx_configs: Dict, custom_architecture: bool, _variant: str, library_name: str, int_dtype: str='int64', float_dtype: str='fp32', fn_get_submodels: Optional[Callable]=None, preprocessors: Optional[List[Any]]=None, legacy: bool=False, model_kwargs: Optional[Dict]=None): return _get_submodels_and_export_configs(model, task, monolith, custom_onnx_configs, custom_architecture, _variant, library_name, int_dtype, float_dtype, fn_get_submodels, preprocessors, legacy, model_kwargs, exporter='onnx') DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT = 'The usage of `optimum.exporters.onnx.utils.get_{model_type}_models_for_export` is deprecated and will be removed in a future release, please use `optimum.exporters.utils.get_{model_type}_models_for_export` instead.' def get_diffusion_models_for_export(pipeline: 'DiffusionPipeline', int_dtype: str='int64', float_dtype: str='fp32') -> Dict[str, Tuple[Union['PreTrainedModel', 'ModelMixin'], 'ExportConfig']]: logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(model_type='diffusion')) return _get_diffusion_models_for_export(pipeline, int_dtype, float_dtype, exporter='onnx') def get_sam_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig'): logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(model_type='sam')) return _get_sam_models_for_export(model, config) def get_speecht5_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig', model_kwargs: Optional[Dict]): logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(model_type='speecht5')) return _get_speecht5_models_for_export(model, config) def get_encoder_decoder_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig') -> Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel'], 'ExportConfig']]: logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(mode_type='encoder_decoder')) return _get_encoder_decoder_models_for_export(model, config) def get_decoder_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig', legacy: bool=False) -> Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel'], 'ExportConfig']]: logger.warning(DEPRECATION_WARNING_GET_MODEL_FOR_EXPORT.format(model_type='decoder')) return _get_decoder_models_for_export(model, config, legacy) # File: optimum-main/optimum/exporters/tasks.py """""" import importlib import os import warnings from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union import huggingface_hub from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from huggingface_hub.errors import OfflineModeIsEnabled from packaging import version from requests.exceptions import ConnectionError as RequestsConnectionError from transformers import AutoConfig, PretrainedConfig, is_tf_available, is_torch_available from transformers.utils import SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging from ..utils.import_utils import is_diffusers_available, is_onnx_available if TYPE_CHECKING: from .base import ExportConfig logger = logging.get_logger(__name__) if not is_torch_available() and (not is_tf_available()): logger.warning('The export tasks are only supported for PyTorch or TensorFlow. You will not be able to export models without one of these libraries installed.') if is_torch_available(): import torch from transformers import PreTrainedModel if is_tf_available(): from transformers import TFPreTrainedModel if is_diffusers_available(): from diffusers import DiffusionPipeline from diffusers.pipelines.auto_pipeline import AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING ExportConfigConstructor = Callable[[PretrainedConfig], 'ExportConfig'] TaskNameToExportConfigDict = Dict[str, ExportConfigConstructor] def is_backend_available(backend): backend_availablilty = {'onnx': is_onnx_available(), 'tflite': is_tf_available()} return backend_availablilty[backend] def make_backend_config_constructor_for_task(config_cls: Type, task: str) -> ExportConfigConstructor: if '-with-past' in task: if not getattr(config_cls, 'SUPPORTS_PAST', False): raise ValueError(f'{config_cls} does not support tasks with past.') constructor = partial(config_cls, use_past=True, task=task.replace('-with-past', '')) else: constructor = partial(config_cls, task=task) return constructor def supported_tasks_mapping(*supported_tasks: Union[str, Tuple[str, Tuple[str, ...]]], **exporters: str) -> Dict[str, TaskNameToExportConfigDict]: mapping = {} for (backend, config_cls_name) in exporters.items(): if is_backend_available(backend): config_cls = getattr(importlib.import_module(f'optimum.exporters.{backend}.model_configs'), config_cls_name) mapping[backend] = {} for task in supported_tasks: if isinstance(task, tuple): (task, supported_backends_for_task) = task if backend not in supported_backends_for_task: continue config_constructor = make_backend_config_constructor_for_task(config_cls, task) mapping[backend][task] = config_constructor return mapping def get_diffusers_tasks_to_model_mapping(): tasks_to_model_mapping = {} for (task_name, model_mapping) in (('text-to-image', AUTO_TEXT2IMAGE_PIPELINES_MAPPING), ('image-to-image', AUTO_IMAGE2IMAGE_PIPELINES_MAPPING), ('inpainting', AUTO_INPAINT_PIPELINES_MAPPING)): tasks_to_model_mapping[task_name] = {} for (model_type, model_class) in model_mapping.items(): tasks_to_model_mapping[task_name][model_type] = model_class.__name__ return tasks_to_model_mapping def get_transformers_tasks_to_model_mapping(tasks_to_model_loader, framework='pt'): if framework == 'pt': auto_modeling_module = importlib.import_module('transformers.models.auto.modeling_auto') elif framework == 'tf': auto_modeling_module = importlib.import_module('transformers.models.auto.modeling_tf_auto') tasks_to_model_mapping = {} for (task_name, model_loaders) in tasks_to_model_loader.items(): if isinstance(model_loaders, str): model_loaders = (model_loaders,) tasks_to_model_mapping[task_name] = {} for model_loader in model_loaders: model_loader_class = getattr(auto_modeling_module, model_loader, None) if model_loader_class is not None: tasks_to_model_mapping[task_name].update(model_loader_class._model_mapping._model_mapping) return tasks_to_model_mapping class TasksManager: _TRANSFORMERS_TASKS_TO_MODEL_LOADERS = {} _DIFFUSERS_TASKS_TO_MODEL_LOADERS = {} _TIMM_TASKS_TO_MODEL_LOADERS = {} _LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP = {} _TRANSFORMERS_TASKS_TO_MODEL_MAPPINGS = {} _DIFFUSERS_TASKS_TO_MODEL_MAPPINGS = {} _TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS = {} _LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP = {} _TRANSFORMERS_TASKS_TO_MODEL_MAPPINGS = {} if is_torch_available(): _TRANSFORMERS_TASKS_TO_MODEL_LOADERS = {'audio-classification': 'AutoModelForAudioClassification', 'audio-frame-classification': 'AutoModelForAudioFrameClassification', 'audio-xvector': 'AutoModelForAudioXVector', 'automatic-speech-recognition': ('AutoModelForSpeechSeq2Seq', 'AutoModelForCTC'), 'depth-estimation': 'AutoModelForDepthEstimation', 'feature-extraction': 'AutoModel', 'fill-mask': 'AutoModelForMaskedLM', 'image-classification': 'AutoModelForImageClassification', 'image-segmentation': ('AutoModelForImageSegmentation', 'AutoModelForSemanticSegmentation'), 'image-to-image': 'AutoModelForImageToImage', 'image-to-text': 'AutoModelForVision2Seq', 'mask-generation': 'AutoModel', 'masked-im': 'AutoModelForMaskedImageModeling', 'multiple-choice': 'AutoModelForMultipleChoice', 'object-detection': 'AutoModelForObjectDetection', 'question-answering': 'AutoModelForQuestionAnswering', 'semantic-segmentation': 'AutoModelForSemanticSegmentation', 'text-to-audio': ('AutoModelForTextToSpectrogram', 'AutoModelForTextToWaveform'), 'text-generation': 'AutoModelForCausalLM', 'text2text-generation': 'AutoModelForSeq2SeqLM', 'text-classification': 'AutoModelForSequenceClassification', 'token-classification': 'AutoModelForTokenClassification', 'zero-shot-image-classification': 'AutoModelForZeroShotImageClassification', 'zero-shot-object-detection': 'AutoModelForZeroShotObjectDetection'} _TRANSFORMERS_TASKS_TO_MODEL_MAPPINGS = get_transformers_tasks_to_model_mapping(_TRANSFORMERS_TASKS_TO_MODEL_LOADERS, framework='pt') _TIMM_TASKS_TO_MODEL_LOADERS = {'image-classification': 'create_model'} _SENTENCE_TRANSFORMERS_TASKS_TO_MODEL_LOADERS = {'feature-extraction': 'SentenceTransformer', 'sentence-similarity': 'SentenceTransformer'} if is_diffusers_available(): _DIFFUSERS_TASKS_TO_MODEL_LOADERS = {'image-to-image': 'AutoPipelineForImage2Image', 'inpainting': 'AutoPipelineForInpainting', 'text-to-image': 'AutoPipelineForText2Image'} _DIFFUSERS_TASKS_TO_MODEL_MAPPINGS = get_diffusers_tasks_to_model_mapping() _LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP = {'diffusers': _DIFFUSERS_TASKS_TO_MODEL_LOADERS, 'sentence_transformers': _SENTENCE_TRANSFORMERS_TASKS_TO_MODEL_LOADERS, 'timm': _TIMM_TASKS_TO_MODEL_LOADERS, 'transformers': _TRANSFORMERS_TASKS_TO_MODEL_LOADERS} if is_tf_available(): _TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS = {'document-question-answering': 'TFAutoModelForDocumentQuestionAnswering', 'feature-extraction': 'TFAutoModel', 'fill-mask': 'TFAutoModelForMaskedLM', 'text-generation': 'TFAutoModelForCausalLM', 'image-classification': 'TFAutoModelForImageClassification', 'text2text-generation': 'TFAutoModelForSeq2SeqLM', 'text-classification': 'TFAutoModelForSequenceClassification', 'token-classification': 'TFAutoModelForTokenClassification', 'multiple-choice': 'TFAutoModelForMultipleChoice', 'question-answering': 'TFAutoModelForQuestionAnswering', 'image-segmentation': 'TFAutoModelForImageSegmentation', 'masked-im': 'TFAutoModelForMaskedImageModeling', 'semantic-segmentation': 'TFAutoModelForSemanticSegmentation', 'automatic-speech-recognition': 'TFAutoModelForSpeechSeq2Seq', 'audio-classification': 'TFAutoModelForAudioClassification', 'image-to-text': 'TFAutoModelForVision2Seq', 'zero-shot-image-classification': 'TFAutoModelForZeroShotImageClassification', 'zero-shot-object-detection': 'TFAutoModelForZeroShotObjectDetection'} _LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP = {'transformers': _TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS} _TRANSFORMERS_TASKS_TO_TF_MODEL_MAPPINGS = get_transformers_tasks_to_model_mapping(_TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS, framework='tf') _SYNONYM_TASK_MAP = {'audio-ctc': 'automatic-speech-recognition', 'causal-lm': 'text-generation', 'causal-lm-with-past': 'text-generation-with-past', 'default': 'feature-extraction', 'default-with-past': 'feature-extraction-with-past', 'masked-lm': 'fill-mask', 'mask-generation': 'feature-extraction', 'sentence-similarity': 'feature-extraction', 'seq2seq-lm': 'text2text-generation', 'seq2seq-lm-with-past': 'text2text-generation-with-past', 'sequence-classification': 'text-classification', 'speech2seq-lm': 'automatic-speech-recognition', 'speech2seq-lm-with-past': 'automatic-speech-recognition-with-past', 'summarization': 'text2text-generation', 'text-to-speech': 'text-to-audio', 'translation': 'text2text-generation', 'vision2seq-lm': 'image-to-text', 'zero-shot-classification': 'text-classification', 'image-feature-extraction': 'feature-extraction', 'lcm': 'text-to-image', 'stable-diffusion': 'text-to-image', 'stable-diffusion-xl': 'text-to-image'} _CUSTOM_CLASSES = {('pt', 'pix2struct', 'image-to-text'): ('transformers', 'Pix2StructForConditionalGeneration'), ('pt', 'pix2struct', 'visual-question-answering'): ('transformers', 'Pix2StructForConditionalGeneration'), ('pt', 'visual-bert', 'question-answering'): ('transformers', 'VisualBertForQuestionAnswering'), ('pt', 'vision-encoder-decoder', 'document-question-answering'): ('transformers', 'VisionEncoderDecoderModel')} _ENCODER_DECODER_TASKS = ('automatic-speech-recognition', 'document-question-answering', 'feature-extraction-with-past', 'image-to-text', 'text2text-generation', 'visual-question-answering') _MODEL_TYPE_FOR_DEFAULT_CONFIG = {'timm': 'default-timm-config'} _DIFFUSERS_SUPPORTED_MODEL_TYPE = {'clip-text-model': supported_tasks_mapping('feature-extraction', onnx='CLIPTextOnnxConfig'), 'clip-text-with-projection': supported_tasks_mapping('feature-extraction', onnx='CLIPTextWithProjectionOnnxConfig'), 'unet': supported_tasks_mapping('semantic-segmentation', onnx='UNetOnnxConfig'), 'vae-encoder': supported_tasks_mapping('semantic-segmentation', onnx='VaeEncoderOnnxConfig'), 'vae-decoder': supported_tasks_mapping('semantic-segmentation', onnx='VaeDecoderOnnxConfig')} _TIMM_SUPPORTED_MODEL_TYPE = {'default-timm-config': supported_tasks_mapping('image-classification', onnx='TimmDefaultOnnxConfig')} _SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE = {'clip': supported_tasks_mapping('feature-extraction', 'sentence-similarity', onnx='SentenceTransformersCLIPOnnxConfig'), 'transformer': supported_tasks_mapping('feature-extraction', 'sentence-similarity', onnx='SentenceTransformersTransformerOnnxConfig')} _SUPPORTED_MODEL_TYPE = {'audio-spectrogram-transformer': supported_tasks_mapping('feature-extraction', 'audio-classification', onnx='ASTOnnxConfig'), 'albert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='AlbertOnnxConfig', tflite='AlbertTFLiteConfig'), 'bart': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text2text-generation', 'text2text-generation-with-past', 'text-classification', 'question-answering', onnx='BartOnnxConfig'), 'beit': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='BeitOnnxConfig'), 'bert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='BertOnnxConfig', tflite='BertTFLiteConfig'), 'blenderbot': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='BlenderbotOnnxConfig'), 'blenderbot-small': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='BlenderbotSmallOnnxConfig'), 'bloom': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', 'token-classification', onnx='BloomOnnxConfig'), 'camembert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='CamembertOnnxConfig', tflite='CamembertTFLiteConfig'), 'clip': supported_tasks_mapping('feature-extraction', 'zero-shot-image-classification', onnx='CLIPOnnxConfig'), 'clip-vision-model': supported_tasks_mapping('feature-extraction', onnx='CLIPVisionModelOnnxConfig'), 'codegen': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', onnx='CodeGenOnnxConfig'), 'convbert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='ConvBertOnnxConfig', tflite='ConvBertTFLiteConfig'), 'convnext': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='ConvNextOnnxConfig'), 'convnextv2': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='ConvNextV2OnnxConfig'), 'cvt': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='CvTOnnxConfig'), 'data2vec-text': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='Data2VecTextOnnxConfig'), 'data2vec-vision': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='Data2VecVisionOnnxConfig'), 'data2vec-audio': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', 'audio-frame-classification', 'audio-xvector', onnx='Data2VecAudioOnnxConfig'), 'deberta': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'token-classification', 'question-answering', onnx='DebertaOnnxConfig', tflite='DebertaTFLiteConfig'), 'deberta-v2': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', ('multiple-choice', ('onnx',)), 'token-classification', 'question-answering', onnx='DebertaV2OnnxConfig', tflite='DebertaV2TFLiteConfig'), 'deit': supported_tasks_mapping('feature-extraction', 'image-classification', 'masked-im', onnx='DeiTOnnxConfig'), 'detr': supported_tasks_mapping('feature-extraction', 'object-detection', 'image-segmentation', onnx='DetrOnnxConfig'), 'distilbert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='DistilBertOnnxConfig', tflite='DistilBertTFLiteConfig'), 'donut': supported_tasks_mapping('image-to-text', 'image-to-text-with-past', 'document-question-answering', 'document-question-answering-with-past', onnx='VisionEncoderDecoderOnnxConfig'), 'donut-swin': supported_tasks_mapping('feature-extraction', onnx='DonutSwinOnnxConfig'), 'dpt': supported_tasks_mapping('feature-extraction', 'depth-estimation', 'image-segmentation', 'semantic-segmentation', onnx='DptOnnxConfig'), 'electra': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='ElectraOnnxConfig', tflite='ElectraTFLiteConfig'), 'encoder-decoder': supported_tasks_mapping('text2text-generation', 'text2text-generation-with-past', onnx='EncoderDecoderOnnxConfig'), 'esm': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'token-classification', onnx='EsmOnnxConfig'), 'falcon': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'question-answering', 'text-generation', 'text-generation-with-past', 'token-classification', onnx='FalconOnnxConfig'), 'flaubert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='FlaubertOnnxConfig', tflite='FlaubertTFLiteConfig'), 'gemma': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='GemmaOnnxConfig'), 'glpn': supported_tasks_mapping('feature-extraction', 'depth-estimation', onnx='GlpnOnnxConfig'), 'gpt2': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', 'token-classification', onnx='GPT2OnnxConfig'), 'gpt-bigcode': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', 'token-classification', onnx='GPTBigCodeOnnxConfig'), 'gptj': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'question-answering', 'text-classification', onnx='GPTJOnnxConfig'), 'gpt-neo': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='GPTNeoOnnxConfig'), 'gpt-neox': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='GPTNeoXOnnxConfig'), 'groupvit': supported_tasks_mapping('feature-extraction', onnx='GroupViTOnnxConfig'), 'hubert': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', onnx='HubertOnnxConfig'), 'ibert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='IBertOnnxConfig'), 'imagegpt': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='ImageGPTOnnxConfig'), 'layoutlm': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'token-classification', onnx='LayoutLMOnnxConfig'), 'layoutlmv3': supported_tasks_mapping('feature-extraction', 'question-answering', 'text-classification', 'token-classification', onnx='LayoutLMv3OnnxConfig'), 'lilt': supported_tasks_mapping('feature-extraction', 'question-answering', 'text-classification', 'token-classification', onnx='LiltOnnxConfig'), 'levit': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='LevitOnnxConfig'), 'longt5': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='LongT5OnnxConfig'), 'marian': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text2text-generation', 'text2text-generation-with-past', 'text-generation', 'text-generation-with-past', onnx='MarianOnnxConfig'), 'markuplm': supported_tasks_mapping('feature-extraction', 'text-classification', 'token-classification', 'question-answering', onnx='MarkupLMOnnxConfig'), 'mbart': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text2text-generation', 'text2text-generation-with-past', 'text-classification', 'question-answering', onnx='MBartOnnxConfig'), 'mistral': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='MistralOnnxConfig'), 'mobilebert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='MobileBertOnnxConfig', tflite='MobileBertTFLiteConfig'), 'mobilevit': supported_tasks_mapping('feature-extraction', 'image-classification', 'image-segmentation', onnx='MobileViTOnnxConfig'), 'mobilenet-v1': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='MobileNetV1OnnxConfig'), 'mobilenet-v2': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='MobileNetV2OnnxConfig'), 'mpnet': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='MPNetOnnxConfig', tflite='MPNetTFLiteConfig'), 'mpt': supported_tasks_mapping('text-generation', 'text-generation-with-past', 'text-classification', onnx='MPTOnnxConfig'), 'mt5': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='MT5OnnxConfig'), 'musicgen': supported_tasks_mapping('text-to-audio', onnx='MusicgenOnnxConfig'), 'm2m-100': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='M2M100OnnxConfig'), 'nystromformer': supported_tasks_mapping('feature-extraction', 'fill-mask', 'multiple-choice', 'question-answering', 'text-classification', 'token-classification', onnx='NystromformerOnnxConfig'), 'owlv2': supported_tasks_mapping('feature-extraction', 'zero-shot-object-detection', onnx='OwlV2OnnxConfig'), 'owlvit': supported_tasks_mapping('feature-extraction', 'zero-shot-object-detection', onnx='OwlViTOnnxConfig'), 'opt': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'question-answering', 'text-classification', onnx='OPTOnnxConfig'), 'qwen2': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='Qwen2OnnxConfig'), 'llama': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='LlamaOnnxConfig'), 'pegasus': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='PegasusOnnxConfig'), 'perceiver': supported_tasks_mapping('fill-mask', 'image-classification', 'text-classification', onnx='PerceiverOnnxConfig'), 'phi': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='PhiOnnxConfig'), 'phi3': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text-generation', 'text-generation-with-past', 'text-classification', onnx='Phi3OnnxConfig'), 'pix2struct': supported_tasks_mapping('image-to-text', 'image-to-text-with-past', 'visual-question-answering', 'visual-question-answering-with-past', onnx='Pix2StructOnnxConfig'), 'poolformer': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='PoolFormerOnnxConfig'), 'regnet': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='RegNetOnnxConfig'), 'resnet': supported_tasks_mapping('feature-extraction', 'image-classification', onnx='ResNetOnnxConfig', tflite='ResNetTFLiteConfig'), 'roberta': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='RobertaOnnxConfig', tflite='RobertaTFLiteConfig'), 'roformer': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'token-classification', 'multiple-choice', 'question-answering', 'token-classification', onnx='RoFormerOnnxConfig', tflite='RoFormerTFLiteConfig'), 'sam': supported_tasks_mapping('feature-extraction', onnx='SamOnnxConfig'), 'segformer': supported_tasks_mapping('feature-extraction', 'image-classification', 'image-segmentation', 'semantic-segmentation', onnx='SegformerOnnxConfig'), 'sew': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', onnx='SEWOnnxConfig'), 'sew-d': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', onnx='SEWDOnnxConfig'), 'speech-to-text': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'automatic-speech-recognition', 'automatic-speech-recognition-with-past', onnx='Speech2TextOnnxConfig'), 'speecht5': supported_tasks_mapping('text-to-audio', onnx='SpeechT5OnnxConfig'), 'splinter': supported_tasks_mapping('feature-extraction', 'question-answering', onnx='SplinterOnnxConfig'), 'squeezebert': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='SqueezeBertOnnxConfig'), 'swin': supported_tasks_mapping('feature-extraction', 'image-classification', 'masked-im', onnx='SwinOnnxConfig'), 'swin2sr': supported_tasks_mapping('feature-extraction', 'image-to-image', onnx='Swin2srOnnxConfig'), 't5': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'text2text-generation', 'text2text-generation-with-past', onnx='T5OnnxConfig'), 'table-transformer': supported_tasks_mapping('feature-extraction', 'object-detection', onnx='TableTransformerOnnxConfig'), 'trocr': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'image-to-text', 'image-to-text-with-past', onnx='TrOCROnnxConfig'), 'unispeech': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', onnx='UniSpeechOnnxConfig'), 'unispeech-sat': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', 'audio-frame-classification', 'audio-xvector', onnx='UniSpeechSATOnnxConfig'), 'vision-encoder-decoder': supported_tasks_mapping('image-to-text', 'image-to-text-with-past', 'document-question-answering', 'document-question-answering-with-past', onnx='VisionEncoderDecoderOnnxConfig'), 'vit': supported_tasks_mapping('feature-extraction', 'image-classification', 'masked-im', onnx='ViTOnnxConfig'), 'vits': supported_tasks_mapping('text-to-audio', onnx='VitsOnnxConfig'), 'wavlm': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', 'audio-frame-classification', 'audio-xvector', onnx='WavLMOnnxConfig'), 'wav2vec2': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', 'audio-frame-classification', 'audio-xvector', onnx='Wav2Vec2OnnxConfig'), 'wav2vec2-conformer': supported_tasks_mapping('feature-extraction', 'automatic-speech-recognition', 'audio-classification', 'audio-frame-classification', 'audio-xvector', onnx='Wav2Vec2ConformerOnnxConfig'), 'whisper': supported_tasks_mapping('feature-extraction', 'feature-extraction-with-past', 'audio-classification', 'automatic-speech-recognition', 'automatic-speech-recognition-with-past', onnx='WhisperOnnxConfig'), 'xlm': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='XLMOnnxConfig', tflite='XLMTFLiteConfig'), 'xlm-roberta': supported_tasks_mapping('feature-extraction', 'fill-mask', 'text-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx='XLMRobertaOnnxConfig', tflite='XLMRobertaTFLiteConfig'), 'yolos': supported_tasks_mapping('feature-extraction', 'object-detection', onnx='YolosOnnxConfig')} _LIBRARY_TO_SUPPORTED_MODEL_TYPES = {'diffusers': _DIFFUSERS_SUPPORTED_MODEL_TYPE, 'sentence_transformers': _SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE, 'timm': _TIMM_SUPPORTED_MODEL_TYPE, 'transformers': _SUPPORTED_MODEL_TYPE} _UNSUPPORTED_CLI_MODEL_TYPE = {'unet', 'vae-encoder', 'vae-decoder', 'clip-text-model', 'clip-text-with-projection', 'trocr'} _SUPPORTED_CLI_MODEL_TYPE = (set(_SUPPORTED_MODEL_TYPE.keys()) | set(_DIFFUSERS_SUPPORTED_MODEL_TYPE.keys()) | set(_TIMM_SUPPORTED_MODEL_TYPE.keys()) | set(_SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE.keys())) - _UNSUPPORTED_CLI_MODEL_TYPE @classmethod def create_register(cls, backend: str, overwrite_existing: bool=False) -> Callable[[str, Tuple[str, ...]], Callable[[Type], Type]]: def wrapper(model_type: str, *supported_tasks: str, library_name: str='transformers') -> Callable[[Type], Type]: def decorator(config_cls: Type) -> Type: supported_model_type_for_library = TasksManager._LIBRARY_TO_SUPPORTED_MODEL_TYPES[library_name] mapping = supported_model_type_for_library.get(model_type, {}) mapping_backend = mapping.get(backend, {}) for task in supported_tasks: normalized_task = task.replace('-with-past', '') if normalized_task not in cls.get_all_tasks(): known_tasks = ', '.join(cls.get_all_tasks()) raise ValueError(f'The TasksManager does not know the task called "{normalized_task}", known tasks: {known_tasks}.') if not overwrite_existing and task in mapping_backend: continue mapping_backend[task] = make_backend_config_constructor_for_task(config_cls, task) mapping[backend] = mapping_backend supported_model_type_for_library[model_type] = mapping return config_cls return decorator return wrapper @staticmethod def get_supported_tasks_for_model_type(model_type: str, exporter: str, model_name: Optional[str]=None, library_name: Optional[str]=None) -> TaskNameToExportConfigDict: if library_name is None: logger.warning('Not passing the argument `library_name` to `get_supported_tasks_for_model_type` is deprecated and the support will be removed in a future version of Optimum. Please specify a `library_name`. Defaulting to `"transformers`.') supported_model_type_for_library = {**TasksManager._DIFFUSERS_SUPPORTED_MODEL_TYPE, **TasksManager._TIMM_SUPPORTED_MODEL_TYPE, **TasksManager._SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE, **TasksManager._SUPPORTED_MODEL_TYPE} library_name = 'transformers' else: supported_model_type_for_library = TasksManager._LIBRARY_TO_SUPPORTED_MODEL_TYPES[library_name] model_type = model_type.lower().replace('_', '-') model_type_and_model_name = f'{model_type} ({model_name})' if model_name else model_type default_model_type = None if library_name in TasksManager._MODEL_TYPE_FOR_DEFAULT_CONFIG: default_model_type = TasksManager._MODEL_TYPE_FOR_DEFAULT_CONFIG[library_name] if model_type not in supported_model_type_for_library: if default_model_type is not None: model_type = default_model_type else: raise KeyError(f'{model_type_and_model_name} is not supported yet for {library_name}. Only {list(supported_model_type_for_library.keys())} are supported for the library {library_name}. If you want to support {model_type} please propose a PR or open up an issue.') if exporter not in supported_model_type_for_library[model_type]: raise KeyError(f'{model_type_and_model_name} is not supported yet with the {exporter} backend. Only {list(supported_model_type_for_library[model_type].keys())} are supported. If you want to support {exporter} please propose a PR or open up an issue.') return supported_model_type_for_library[model_type][exporter] @staticmethod def get_supported_model_type_for_task(task: str, exporter: str) -> List[str]: return [model_type.replace('-', '_') for model_type in TasksManager._SUPPORTED_MODEL_TYPE if task in TasksManager._SUPPORTED_MODEL_TYPE[model_type][exporter]] @staticmethod def synonyms_for_task(task: str) -> Set[str]: synonyms = [k for (k, v) in TasksManager._SYNONYM_TASK_MAP.items() if v == task] synonyms += [k for (k, v) in TasksManager._SYNONYM_TASK_MAP.items() if v == TasksManager.map_from_synonym(task)] synonyms = set(synonyms) try: synonyms.remove(task) except KeyError: pass return synonyms @staticmethod def map_from_synonym(task: str) -> str: if task in TasksManager._SYNONYM_TASK_MAP: task = TasksManager._SYNONYM_TASK_MAP[task] return task @staticmethod def _validate_framework_choice(framework: str): if framework not in ['pt', 'tf']: raise ValueError(f'Only two frameworks are supported for export: pt or tf, but {framework} was provided.') elif framework == 'pt' and (not is_torch_available()): raise RuntimeError('Cannot export model using PyTorch because no PyTorch package was found.') elif framework == 'tf' and (not is_tf_available()): raise RuntimeError('Cannot export model using TensorFlow because no TensorFlow package was found.') @staticmethod def get_model_class_for_task(task: str, framework: str='pt', model_type: Optional[str]=None, model_class_name: Optional[str]=None, library: str='transformers') -> Type: task = task.replace('-with-past', '') task = TasksManager.map_from_synonym(task) TasksManager._validate_framework_choice(framework) if (framework, model_type, task) in TasksManager._CUSTOM_CLASSES: (library, class_name) = TasksManager._CUSTOM_CLASSES[framework, model_type, task] loaded_library = importlib.import_module(library) return getattr(loaded_library, class_name) else: if framework == 'pt': tasks_to_model_loader = TasksManager._LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP[library] else: tasks_to_model_loader = TasksManager._LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP[library] loaded_library = importlib.import_module(library) if model_class_name is None: if task not in tasks_to_model_loader: raise KeyError(f'Unknown task: {task}. Possible values are: ' + ', '.join([f'`{key}` for {tasks_to_model_loader[key]}' for key in tasks_to_model_loader])) if isinstance(tasks_to_model_loader[task], str): model_class_name = tasks_to_model_loader[task] elif library == 'transformers': if model_type is None: logger.warning(f'No model type passed for the task {task}, that may be mapped to several loading classes ({tasks_to_model_loader[task]}). Defaulting to {tasks_to_model_loader[task][0]} to load the model.') model_class_name = tasks_to_model_loader[task][0] else: for autoclass_name in tasks_to_model_loader[task]: module = getattr(loaded_library, autoclass_name) if model_type in module._model_mapping._model_mapping or model_type.replace('-', '_') in module._model_mapping._model_mapping: model_class_name = autoclass_name break if model_class_name is None: raise ValueError(f'Unrecognized configuration classes {tasks_to_model_loader[task]} do not match with the model type {model_type} and task {task}.') else: raise NotImplementedError('For library other than transformers, the _TASKS_TO_MODEL_LOADER mapping should be one to one.') return getattr(loaded_library, model_class_name) @staticmethod def get_model_files(model_name_or_path: Union[str, Path], subfolder: str='', cache_dir: str=HUGGINGFACE_HUB_CACHE, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None): if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token request_exception = None full_model_path = Path(model_name_or_path, subfolder) if full_model_path.is_dir(): all_files = [os.path.relpath(os.path.join(dirpath, file), full_model_path) for (dirpath, _, filenames) in os.walk(full_model_path) for file in filenames] else: try: if not isinstance(model_name_or_path, str): model_name_or_path = str(model_name_or_path) all_files = huggingface_hub.list_repo_files(model_name_or_path, repo_type='model', token=token, revision=revision) if subfolder != '': all_files = [file[len(subfolder) + 1:] for file in all_files if file.startswith(subfolder)] except (RequestsConnectionError, OfflineModeIsEnabled) as e: snapshot_path = huggingface_hub.snapshot_download(repo_id=model_name_or_path, revision=revision, cache_dir=cache_dir, token=token) full_model_path = Path(snapshot_path, subfolder) if full_model_path.is_dir(): all_files = [os.path.relpath(os.path.join(dirpath, file), full_model_path) for (dirpath, _, filenames) in os.walk(full_model_path) for file in filenames] else: request_exception = e return (all_files, request_exception) @staticmethod def determine_framework(model_name_or_path: Union[str, Path], subfolder: str='', revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, token: Optional[Union[bool, str]]=None) -> str: (all_files, request_exception) = TasksManager.get_model_files(model_name_or_path, subfolder=subfolder, cache_dir=cache_dir, token=token, revision=revision) pt_weight_name = Path(WEIGHTS_NAME).stem pt_weight_extension = Path(WEIGHTS_NAME).suffix safe_weight_name = Path(SAFE_WEIGHTS_NAME).stem safe_weight_extension = Path(SAFE_WEIGHTS_NAME).suffix is_pt_weight_file = [file.startswith(pt_weight_name) and file.endswith(pt_weight_extension) or (file.startswith(safe_weight_name) and file.endswith(safe_weight_extension)) for file in all_files] weight_name = Path(TF2_WEIGHTS_NAME).stem weight_extension = Path(TF2_WEIGHTS_NAME).suffix is_tf_weight_file = [file.startswith(weight_name) and file.endswith(weight_extension) for file in all_files] if any(is_pt_weight_file): framework = 'pt' elif any(is_tf_weight_file): framework = 'tf' elif 'model_index.json' in all_files and any((file.endswith((pt_weight_extension, safe_weight_extension)) for file in all_files)): framework = 'pt' elif 'config_sentence_transformers.json' in all_files: framework = 'pt' elif request_exception is not None: raise RequestsConnectionError(f'The framework could not be automatically inferred. If using the command-line, please provide the argument --framework (pt,tf) Detailed error: {request_exception}') else: raise FileNotFoundError(f'Cannot determine framework from given checkpoint location. There should be a {Path(WEIGHTS_NAME).stem}*{Path(WEIGHTS_NAME).suffix} for PyTorch or {Path(TF2_WEIGHTS_NAME).stem}*{Path(TF2_WEIGHTS_NAME).suffix} for TensorFlow.') if is_torch_available(): framework = framework or 'pt' elif is_tf_available(): framework = framework or 'tf' else: raise EnvironmentError('Neither PyTorch nor TensorFlow found in environment. Cannot export model.') logger.info(f'Framework not specified. Using {framework} to export the model.') return framework @classmethod def _infer_task_from_model_or_model_class(cls, model: Optional[Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline']]=None, model_class: Optional[Type[Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline']]]=None) -> str: if model is not None and model_class is not None: raise ValueError('Either a model or a model class must be provided, but both were given here.') if model is None and model_class is None: raise ValueError('Either a model or a model class must be provided, but none were given here.') target_class_name = model.__class__.__name__ if model is not None else model_class.__name__ target_class_module = model.__class__.__module__ if model is not None else model_class.__module__ tasks_to_model_loaders = None if target_class_name.startswith('AutoModel'): tasks_to_model_loaders = cls._TRANSFORMERS_TASKS_TO_MODEL_LOADERS elif target_class_name.startswith('TFAutoModel'): tasks_to_model_loaders = cls._TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS elif target_class_name.startswith('AutoPipeline'): tasks_to_model_loaders = cls._DIFFUSERS_TASKS_TO_MODEL_LOADERS if tasks_to_model_loaders is not None: for (task_name, model_loaders) in tasks_to_model_loaders.items(): if isinstance(model_loaders, str): model_loaders = (model_loaders,) for model_loader_class_name in model_loaders: if target_class_name == model_loader_class_name: return task_name tasks_to_model_mapping = None if target_class_module.startswith('transformers'): if target_class_name.startswith('TF'): tasks_to_model_mapping = cls._TRANSFORMERS_TASKS_TO_TF_MODEL_MAPPINGS else: tasks_to_model_mapping = cls._TRANSFORMERS_TASKS_TO_MODEL_MAPPINGS elif target_class_module.startswith('diffusers'): tasks_to_model_mapping = cls._DIFFUSERS_TASKS_TO_MODEL_MAPPINGS if tasks_to_model_mapping is not None: for (task_name, model_mapping) in tasks_to_model_mapping.items(): for (model_type, model_class_name) in model_mapping.items(): if target_class_name == model_class_name: return task_name raise ValueError('The task name could not be automatically inferred. If using the command-line, please provide the argument --task task-name. Example: `--task text-classification`.') @classmethod def _infer_task_from_model_name_or_path(cls, model_name_or_path: str, subfolder: str='', revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, token: Optional[Union[bool, str]]=None) -> str: inferred_task_name = None is_local = os.path.isdir(os.path.join(model_name_or_path, subfolder)) if is_local: raise RuntimeError(f"Cannot infer the task from a local directory yet, please specify the task manually ({', '.join(TasksManager.get_all_tasks())}).") else: if subfolder != '': raise RuntimeError('Cannot infer the task from a model repo with a subfolder yet, please specify the task manually.') try: model_info = huggingface_hub.model_info(model_name_or_path, revision=revision, token=token) except (RequestsConnectionError, OfflineModeIsEnabled): raise RuntimeError(f"Hugging Face Hub is not reachable and we cannot infer the task from a cached model. Make sure you are not offline, or otherwise please specify the `task` (or `--task` in command-line) argument ({', '.join(TasksManager.get_all_tasks())}).") library_name = cls.infer_library_from_model(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) if library_name == 'timm': inferred_task_name = 'image-classification' elif library_name == 'diffusers': pipeline_tag = pipeline_tag = model_info.pipeline_tag model_config = model_info.config if pipeline_tag is not None: inferred_task_name = cls.map_from_synonym(pipeline_tag) elif model_config is not None: if model_config is not None and model_config.get('diffusers', None) is not None: diffusers_class_name = model_config['diffusers']['_class_name'] for (task_name, model_mapping) in cls._DIFFUSERS_TASKS_TO_MODEL_MAPPINGS.items(): for (model_type, model_class_name) in model_mapping.items(): if diffusers_class_name == model_class_name: inferred_task_name = task_name break if inferred_task_name is not None: break elif library_name == 'transformers': pipeline_tag = model_info.pipeline_tag transformers_info = model_info.transformersInfo if pipeline_tag is not None: inferred_task_name = cls.map_from_synonym(model_info.pipeline_tag) elif transformers_info is not None: transformers_pipeline_tag = transformers_info.get('pipeline_tag', None) transformers_auto_model = transformers_info.get('auto_model', None) if transformers_pipeline_tag is not None: pipeline_tag = transformers_info['pipeline_tag'] inferred_task_name = cls.map_from_synonym(pipeline_tag) elif transformers_auto_model is not None: transformers_auto_model = transformers_auto_model.replace('TF', '') for (task_name, model_loaders) in cls._TRANSFORMERS_TASKS_TO_MODEL_LOADERS.items(): if isinstance(model_loaders, str): model_loaders = (model_loaders,) for model_loader_class_name in model_loaders: if transformers_auto_model == model_loader_class_name: inferred_task_name = task_name break if inferred_task_name is not None: break if inferred_task_name is None: raise KeyError(f'Could not find the proper task name for the model {model_name_or_path}.') return inferred_task_name @classmethod def infer_task_from_model(cls, model: Union[str, 'PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline', Type], subfolder: str='', revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, token: Optional[Union[bool, str]]=None) -> str: inferred_task_name = None if isinstance(model, str): inferred_task_name = cls._infer_task_from_model_name_or_path(model_name_or_path=model, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) elif type(model) == type: inferred_task_name = cls._infer_task_from_model_or_model_class(model_class=model) else: inferred_task_name = cls._infer_task_from_model_or_model_class(model=model) if inferred_task_name is None: raise ValueError('The task name could not be automatically inferred. If using the command-line, please provide the argument --task task-name. Example: `--task text-classification`.') return inferred_task_name @classmethod def _infer_library_from_model_or_model_class(cls, model: Optional[Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline']]=None, model_class: Optional[Type[Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline']]]=None): inferred_library_name = None if model is not None and model_class is not None: raise ValueError('Either a model or a model class must be provided, but both were given here.') if model is None and model_class is None: raise ValueError('Either a model or a model class must be provided, but none were given here.') target_class_module = model.__class__.__module__ if model is not None else model_class.__module__ if target_class_module.startswith('sentence_transformers'): inferred_library_name = 'sentence_transformers' elif target_class_module.startswith('transformers'): inferred_library_name = 'transformers' elif target_class_module.startswith('diffusers'): inferred_library_name = 'diffusers' elif target_class_module.startswith('timm'): inferred_library_name = 'timm' if inferred_library_name is None: raise ValueError('The library name could not be automatically inferred. If using the command-line, please provide the argument --library {transformers,diffusers,timm,sentence_transformers}. Example: `--library diffusers`.') return inferred_library_name @classmethod def _infer_library_from_model_name_or_path(cls, model_name_or_path: Union[str, Path], subfolder: str='', revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, token: Optional[Union[bool, str]]=None): inferred_library_name = None (all_files, _) = TasksManager.get_model_files(model_name_or_path, subfolder=subfolder, cache_dir=cache_dir, revision=revision, token=token) if 'model_index.json' in all_files: inferred_library_name = 'diffusers' elif any((file_path.startswith('sentence_') for file_path in all_files)) or 'config_sentence_transformers.json' in all_files: inferred_library_name = 'sentence_transformers' elif 'config.json' in all_files: kwargs = {'subfolder': subfolder, 'revision': revision, 'cache_dir': cache_dir, 'token': token} (config_dict, kwargs) = PretrainedConfig.get_config_dict(model_name_or_path, **kwargs) model_config = PretrainedConfig.from_dict(config_dict, **kwargs) if hasattr(model_config, 'pretrained_cfg') or hasattr(model_config, 'architecture'): inferred_library_name = 'timm' elif hasattr(model_config, '_diffusers_version'): inferred_library_name = 'diffusers' else: inferred_library_name = 'transformers' if inferred_library_name is None: raise ValueError('The library name could not be automatically inferred. If using the command-line, please provide the argument --library {transformers,diffusers,timm,sentence_transformers}. Example: `--library diffusers`.') return inferred_library_name @classmethod def infer_library_from_model(cls, model: Union[str, 'PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline', Type], subfolder: str='', revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, token: Optional[Union[bool, str]]=None): if isinstance(model, str): library_name = cls._infer_library_from_model_name_or_path(model_name_or_path=model, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) elif type(model) == type: library_name = cls._infer_library_from_model_or_model_class(model_class=model) else: library_name = cls._infer_library_from_model_or_model_class(model=model) return library_name @classmethod def standardize_model_attributes(cls, model: Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline']): library_name = TasksManager.infer_library_from_model(model) if library_name == 'diffusers': inferred_model_type = None for (task_name, model_mapping) in cls._DIFFUSERS_TASKS_TO_MODEL_MAPPINGS.items(): for (model_type, model_class_name) in model_mapping.items(): if model.__class__.__name__ == model_class_name: inferred_model_type = model_type break if inferred_model_type is not None: break model.config.export_model_type = inferred_model_type elif library_name == 'timm': model.config = PretrainedConfig.from_dict(model.pretrained_cfg) model.config.export_model_type = model.pretrained_cfg['architecture'] elif library_name == 'sentence_transformers': if 'Transformer' in model[0].__class__.__name__: model.config = model[0].auto_model.config model.config.export_model_type = 'transformer' elif 'CLIP' in model[0].__class__.__name__: model.config = model[0].model.config model.config.export_model_type = 'clip' else: raise ValueError(f'The export of a sentence_transformers model with the first module being {model[0].__class__.__name__} is currently not supported in Optimum. Please open an issue or submit a PR to add the support.') @staticmethod def get_all_tasks(): tasks = [] if is_torch_available(): mapping = TasksManager._LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP else: mapping = TasksManager._LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP tasks = [] for d in mapping.values(): tasks += list(d.keys()) tasks = list(set(tasks)) return tasks @staticmethod def get_model_from_task(task: str, model_name_or_path: Union[str, Path], subfolder: str='', revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, token: Optional[Union[bool, str]]=None, framework: Optional[str]=None, torch_dtype: Optional['torch.dtype']=None, device: Optional[Union['torch.device', str]]=None, library_name: Optional[str]=None, **model_kwargs) -> Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline']: if framework is None: framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) if library_name is None: library_name = TasksManager.infer_library_from_model(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) original_task = task if task == 'auto': task = TasksManager.infer_task_from_model(model_name_or_path, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token) model_type = None model_class_name = None kwargs = {'subfolder': subfolder, 'revision': revision, 'cache_dir': cache_dir, **model_kwargs} if library_name == 'transformers': config = AutoConfig.from_pretrained(model_name_or_path, **kwargs) model_type = config.model_type.replace('_', '-') if original_task == 'automatic-speech-recognition' or task == 'automatic-speech-recognition': if original_task == 'auto' and config.architectures is not None: model_class_name = config.architectures[0] if library_name == 'diffusers': config = DiffusionPipeline.load_config(model_name_or_path, **kwargs) class_name = config.get('_class_name', None) loaded_library = importlib.import_module(library_name) model_class = getattr(loaded_library, class_name) else: model_class = TasksManager.get_model_class_for_task(task, framework, model_type=model_type, model_class_name=model_class_name, library=library_name) if library_name == 'timm': model = model_class(f'hf_hub:{model_name_or_path}', pretrained=True, exportable=True) model = model.to(torch_dtype).to(device) elif library_name == 'sentence_transformers': cache_folder = model_kwargs.pop('cache_folder', None) use_auth_token = model_kwargs.pop('use_auth_token', None) token = model_kwargs.pop('token', None) trust_remote_code = model_kwargs.pop('trust_remote_code', False) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token model = model_class(model_name_or_path, device=device, cache_folder=cache_folder, token=token, trust_remote_code=trust_remote_code) else: try: if framework == 'pt': kwargs['torch_dtype'] = torch_dtype if isinstance(device, str): device = torch.device(device) elif device is None: device = torch.device('cpu') if version.parse(torch.__version__) >= version.parse('2.0') and library_name != 'diffusers': with device: model = model_class.from_pretrained(model_name_or_path, **kwargs) else: model = model_class.from_pretrained(model_name_or_path, **kwargs).to(device) else: model = model_class.from_pretrained(model_name_or_path, **kwargs) except OSError: if framework == 'pt': logger.info('Loading TensorFlow model in PyTorch before exporting.') kwargs['from_tf'] = True model = model_class.from_pretrained(model_name_or_path, **kwargs) else: logger.info('Loading PyTorch model in TensorFlow before exporting.') kwargs['from_pt'] = True model = model_class.from_pretrained(model_name_or_path, **kwargs) TasksManager.standardize_model_attributes(model) return model @staticmethod def get_exporter_config_constructor(exporter: str, model: Optional[Union['PreTrainedModel', 'TFPreTrainedModel']]=None, task: str='feature-extraction', model_type: Optional[str]=None, model_name: Optional[str]=None, exporter_config_kwargs: Optional[Dict[str, Any]]=None, library_name: Optional[str]=None) -> ExportConfigConstructor: if library_name is None: logger.warning('Passing the argument `library_name` to `get_supported_tasks_for_model_type` is required, but got library_name=None. Defaulting to `transformers`. An error will be raised in a future version of Optimum if `library_name` is not provided.') supported_model_type_for_library = {**TasksManager._DIFFUSERS_SUPPORTED_MODEL_TYPE, **TasksManager._TIMM_SUPPORTED_MODEL_TYPE, **TasksManager._SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE, **TasksManager._SUPPORTED_MODEL_TYPE} library_name = 'transformers' else: supported_model_type_for_library = TasksManager._LIBRARY_TO_SUPPORTED_MODEL_TYPES[library_name] if model is None and model_type is None: raise ValueError('Either a model_type or model should be provided to retrieve the export config.') if model_type is None: if hasattr(model.config, 'export_model_type'): model_type = model.config.export_model_type else: model_type = getattr(model.config, 'model_type', None) if model_type is None: raise ValueError('Model type cannot be inferred. Please provide the model_type for the model!') model_type = model_type.replace('_', '-') model_name = getattr(model, 'name', model_name) model_tasks = TasksManager.get_supported_tasks_for_model_type(model_type, exporter, model_name=model_name, library_name=library_name) if task not in model_tasks: synonyms = TasksManager.synonyms_for_task(task) for synonym in synonyms: if synonym in model_tasks: task = synonym break if task not in model_tasks: raise ValueError(f"{model_type} doesn't support task {task} for the {exporter} backend. Supported tasks are: {', '.join(model_tasks.keys())}.") if model_type not in supported_model_type_for_library: model_type = TasksManager._MODEL_TYPE_FOR_DEFAULT_CONFIG[library_name] exporter_config_constructor = supported_model_type_for_library[model_type][exporter][task] if exporter_config_kwargs is not None: exporter_config_constructor = partial(exporter_config_constructor, **exporter_config_kwargs) return exporter_config_constructor # File: optimum-main/optimum/exporters/tflite/__init__.py from typing import TYPE_CHECKING from transformers.utils import _LazyModule _import_structure = {'base': ['QuantizationApproach', 'TFLiteQuantizationConfig', 'TFLiteConfig'], 'convert': ['export', 'validate_model_outputs']} if TYPE_CHECKING: from .base import QuantizationApproach, TFLiteQuantizationConfig, TFLiteConfig from .convert import export, validate_model_outputs else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: optimum-main/optimum/exporters/tflite/__main__.py """""" from argparse import ArgumentParser from requests.exceptions import ConnectionError as RequestsConnectionError from ...commands.export.tflite import parse_args_tflite from ...utils import logging from ...utils.save_utils import maybe_load_preprocessors, maybe_save_preprocessors from ..error_utils import AtolError, OutputMatchError, ShapeError from ..tasks import TasksManager from .base import TFLiteQuantizationConfig from .convert import export, validate_model_outputs logger = logging.get_logger() logger.setLevel(logging.INFO) def main(): parser = ArgumentParser('Hugging Face Optimum TensorFlow Lite exporter') parse_args_tflite(parser) args = parser.parse_args() args.output = args.output.joinpath('model.tflite') if not args.output.parent.exists(): args.output.parent.mkdir(parents=True) task = args.task if task == 'auto': try: task = TasksManager.infer_task_from_model(args.model) except KeyError as e: raise KeyError(f"The task could not be automatically inferred. Please provide the argument --task with the task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}") except RequestsConnectionError as e: raise RequestsConnectionError(f"The task could not be automatically inferred as this is available only for models hosted on the Hugging Face Hub. Please provide the argument --task with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}") model = TasksManager.get_model_from_task(task, args.model, framework='tf', cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code) tflite_config_constructor = TasksManager.get_exporter_config_constructor(model=model, exporter='tflite', task=task, library_name='transformers') shapes = {name: getattr(args, name) for name in tflite_config_constructor.func.get_mandatory_axes_for_task(task)} tflite_config = tflite_config_constructor(model.config, **shapes) if args.atol is None: args.atol = tflite_config.ATOL_FOR_VALIDATION if isinstance(args.atol, dict): args.atol = args.atol[task.replace('-with-past', '')] model.config.save_pretrained(args.output.parent) maybe_save_preprocessors(args.model, args.output.parent) preprocessor = maybe_load_preprocessors(args.output.parent) if preprocessor: preprocessor = preprocessor[0] else: preprocessor = None quantization_config = None if args.quantize: quantization_config = TFLiteQuantizationConfig(approach=args.quantize, fallback_to_float=args.fallback_to_float, inputs_dtype=args.inputs_type, outputs_dtype=args.outputs_type, calibration_dataset_name_or_path=args.calibration_dataset, calibration_dataset_config_name=args.calibration_dataset_config_name, num_calibration_samples=args.num_calibration_samples, calibration_split=args.calibration_split, primary_key=args.primary_key, secondary_key=args.secondary_key, question_key=args.question_key, context_key=args.context_key, image_key=args.image_key) (tflite_inputs, tflite_outputs) = export(model=model, config=tflite_config, output=args.output, task=task, preprocessor=preprocessor, quantization_config=quantization_config) if args.quantize is None: try: validate_model_outputs(config=tflite_config, reference_model=model, tflite_model_path=args.output, tflite_named_outputs=tflite_config.outputs, atol=args.atol) logger.info(f'The TensorFlow Lite export succeeded and the exported model was saved at: {args.output.parent.as_posix()}') except ShapeError as e: raise e except AtolError as e: logger.warning(f'The TensorFlow Lite export succeeded with the warning: {e}.\n The exported model was saved at: {args.output.parent.as_posix()}') except OutputMatchError as e: logger.warning(f'The TensorFlow Lite export succeeded with the warning: {e}.\n The exported model was saved at: {args.output.parent.as_posix()}') except Exception as e: logger.error(f'An error occured with the error message: {e}.\n The exported model was saved at: {args.output.parent.as_posix()}') if __name__ == '__main__': main() # File: optimum-main/optimum/exporters/tflite/base.py """""" from abc import ABC, abstractmethod from ctypes import ArgumentError from dataclasses import dataclass from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union from transformers.utils import is_tf_available if is_tf_available(): import tensorflow as tf from ..base import ExportConfig if TYPE_CHECKING: from transformers import PretrainedConfig, TFPreTrainedModel from ...utils import DummyInputGenerator if is_tf_available(): from tensorflow import TensorSpec class MissingMandatoryAxisDimension(ValueError): pass class QuantizationApproachNotSupported(ValueError): pass class QuantizationApproach(str, Enum): INT8_DYNAMIC = 'int8-dynamic' INT8 = 'int8' INT8x16 = 'int8x16' FP16 = 'fp16' @dataclass class TFLiteQuantizationConfig: approach: Optional[Union[str, QuantizationApproach]] = None fallback_to_float: bool = False inputs_dtype: Optional[str] = None outputs_dtype: Optional[str] = None calibration_dataset_name_or_path: Optional[Union[str, Path]] = None calibration_dataset_config_name: Optional[str] = None num_calibration_samples: int = 200 calibration_split: Optional[str] = None primary_key: Optional[str] = None secondary_key: Optional[str] = None question_key: Optional[str] = None context_key: Optional[str] = None image_key: Optional[str] = None def __post_init__(self): if self.approach is not None: if isinstance(self.approach, str) and (not isinstance(self.approach, QuantizationApproach)): self.approach = QuantizationApproach(self.approach) class TFLiteConfig(ExportConfig, ABC): NORMALIZED_CONFIG_CLASS: Type = None DUMMY_INPUT_GENERATOR_CLASSES: Tuple[Type, ...] = () ATOL_FOR_VALIDATION: Union[float, Dict[str, float]] = 1e-05 MANDATORY_AXES = () SUPPORTED_QUANTIZATION_APPROACHES: Union[Dict[str, Tuple[QuantizationApproach, ...]], Tuple[QuantizationApproach, ...]] = tuple((approach for approach in QuantizationApproach)) _TASK_TO_COMMON_OUTPUTS = {'text-generation': ['logits'], 'feature-extraction': ['last_hidden_state'], 'image-classification': ['logits'], 'image-segmentation': ['logits', 'pred_boxes', 'pred_masks'], 'masked-im': ['logits'], 'fill-mask': ['logits'], 'multiple-choice': ['logits'], 'object-detection': ['logits', 'pred_boxes'], 'question-answering': ['start_logits', 'end_logits'], 'semantic-segmentation': ['logits'], 'text2text-generation': ['logits', 'encoder_last_hidden_state'], 'text-classification': ['logits'], 'token-classification': ['logits'], 'automatic-speech-recognition': ['logits'], 'audio-classification': ['logits'], 'audio-frame-classification': ['logits'], 'audio-xvector': ['logits']} def __init__(self, config: 'PretrainedConfig', task: str, batch_size: int=1, sequence_length: Optional[int]=None, num_choices: Optional[int]=None, width: Optional[int]=None, height: Optional[int]=None, num_channels: Optional[int]=None, feature_size: Optional[int]=None, nb_max_frames: Optional[int]=None, audio_sequence_length: Optional[int]=None, point_batch_size: Optional[int]=None, nb_points_per_image: Optional[int]=None): self._config = config self._normalized_config = self.NORMALIZED_CONFIG_CLASS(self._config) self.mandatory_axes = () self.task = task self._axes: Dict[str, int] = {} axes_values = {'batch_size': batch_size, 'sequence_length': sequence_length, 'num_choices': num_choices, 'width': width, 'height': height, 'num_channels': num_channels, 'feature_size': feature_size, 'nb_max_frames': nb_max_frames, 'audio_sequence_length': audio_sequence_length, 'point_batch_size': point_batch_size, 'nb_points_per_image': nb_points_per_image} for (name, value) in axes_values.items(): setattr(self, name, value) @classmethod def get_mandatory_axes_for_task(cls, task: str) -> Tuple[str]: axes = [] for axis in cls.MANDATORY_AXES: if isinstance(axis, tuple): (tasks, name) = axis if not isinstance(tasks, tuple): tasks = (tasks,) if task not in tasks: continue else: name = axis axes.append(name) return tuple(axes) @property def task(self) -> str: return self._task @task.setter def task(self, value: str): self._task = value self.mandatory_axes = self.get_mandatory_axes_for_task(self.task) def __getattr__(self, attr_name) -> Any: if attr_name != '_axes' and attr_name in self._axes: return self._axes[attr_name] else: raise AttributeError(attr_name) def __setattr__(self, name: str, value: Any) -> None: mandatory_axes = getattr(self, 'mandatory_axes', []) if name in mandatory_axes: if value is None: if self._normalized_config.has_attribute(name): value = getattr(self._normalized_config, name) self._axes[name] = value else: return super().__setattr__(name, value) def _validate_mandatory_axes(self): for (name, axis_dim) in self._axes.items(): if axis_dim is None: raise MissingMandatoryAxisDimension(f'The value for the {name} axis is missing, it is needed to perform the export to TensorFlow Lite.') def _create_dummy_input_generator_classes(self) -> List['DummyInputGenerator']: self._validate_mandatory_axes() return [cls_(self.task, self._normalized_config, **self._axes) for cls_ in self.DUMMY_INPUT_GENERATOR_CLASSES] @property def values_override(self) -> Optional[Dict[str, Any]]: if hasattr(self._config, 'use_cache'): return {'use_cache': False} return None @property @abstractmethod def inputs(self) -> List[str]: raise NotImplementedError() @property def outputs(self) -> List[str]: return self._TASK_TO_COMMON_OUTPUTS[self.task] def generate_dummy_inputs(self) -> Dict[str, 'tf.Tensor']: dummy_inputs_generators = self._create_dummy_input_generator_classes() dummy_inputs = {} for input_name in self.inputs: input_was_inserted = False for dummy_input_gen in dummy_inputs_generators: if dummy_input_gen.supports_input(input_name): dummy_inputs[input_name] = dummy_input_gen.generate(input_name, framework='tf') input_was_inserted = True break if not input_was_inserted: raise RuntimeError(f'Could not generate dummy inputs for "{input_name}". Try adding a proper dummy input generator to the model TFLite config.') return dummy_inputs @property def inputs_specs(self) -> List['TensorSpec']: dummy_inputs = self.generate_dummy_inputs() return [tf.TensorSpec(dummy_input.shape, dtype=dummy_input.dtype, name=input_name) for (input_name, dummy_input) in dummy_inputs.items()] def model_to_signatures(self, model: 'TFPreTrainedModel', **model_kwargs: Any) -> Dict[str, 'tf.types.experimental.ConcreteFunction']: input_names = self.inputs output_names = self.outputs def forward(*args): if len(args) != len(input_names): raise ArgumentError(f"The number of inputs provided ({len(args)} do not match the number of expected inputs: {', '.join(input_names)}.") kwargs = dict(zip(input_names, args)) outputs = model.call(**kwargs, **model_kwargs) return {key: value for (key, value) in outputs.items() if key in output_names} function = tf.function(forward, input_signature=self.inputs_specs).get_concrete_function() return {'model': function} def supports_quantization_approach(self, quantization_approach: QuantizationApproach) -> bool: supported_approaches = self.SUPPORTED_QUANTIZATION_APPROACHES if isinstance(supported_approaches, dict): supported_approaches = supported_approaches.get(self.task, supported_approaches['default']) return quantization_approach in supported_approaches # File: optimum-main/optimum/exporters/tflite/config.py """""" from ...utils import DummyTextInputGenerator, DummyVisionInputGenerator, logging from .base import TFLiteConfig logger = logging.get_logger(__name__) class TextEncoderTFliteConfig(TFLiteConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator,) MANDATORY_AXES = ('batch_size', 'sequence_length', ('multiple-choice', 'num_choices')) class VisionTFLiteConfig(TFLiteConfig): DUMMY_INPUT_GENERATOR_CLASSES = (DummyVisionInputGenerator,) MANDATORY_AXES = ('batch_size', 'num_channels', 'width', 'height') # File: optimum-main/optimum/exporters/tflite/convert.py """""" from pathlib import Path from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple import numpy as np from transformers import PreTrainedTokenizerBase from transformers.utils import is_tf_available from ...utils import logging from ...utils.preprocessing import Preprocessor, TaskProcessorsManager from ..error_utils import AtolError, OutputMatchError, ShapeError from .base import QuantizationApproach, QuantizationApproachNotSupported if TYPE_CHECKING: from datasets import Dataset if is_tf_available(): import tensorflow as tf from transformers import TFPreTrainedModel from .base import TFLiteConfig, TFLiteQuantizationConfig logger = logging.get_logger(__name__) def validate_model_outputs(config: 'TFLiteConfig', reference_model: 'TFPreTrainedModel', tflite_model_path: Path, tflite_named_outputs: List[str], atol: Optional[float]=None): if not is_tf_available(): raise ImportError('Cannot validate conversion because TensorFlow is not installed. Please install TensorFlow first.') import tensorflow as tf logger.info('Validating TFLite model...') if atol is None: if isinstance(config.ATOL_FOR_VALIDATION, dict): atol = config.ATOL_FOR_VALIDATION[config.task] else: atol = config.ATOL_FOR_VALIDATION inputs = config.generate_dummy_inputs() ref_outputs = reference_model(**inputs) interpreter = tf.lite.Interpreter(model_path=tflite_model_path.as_posix()) tflite_model_runner = interpreter.get_signature_runner('model') tflite_outputs = tflite_model_runner(**inputs) (ref_outputs_set, tflite_output_set) = (set(ref_outputs.keys()), set(tflite_named_outputs)) if not tflite_output_set.issubset(ref_outputs_set): raise OutputMatchError(f'TFLite model output names do not match reference model output names.\nReference model output names: {ref_outputs_set}\nTFLite model output names: {tflite_output_set}Difference: {tflite_output_set.difference(ref_outputs_set)}') else: tflite_output_names = ', '.join(tflite_output_set) logger.info(f'\t-[✓] TFLite model output names match reference model ({tflite_output_names})') shape_failures = [] value_failures = [] for (name, output) in tflite_outputs.items(): if name not in tflite_output_set: continue ref_output = ref_outputs[name].numpy() logger.info(f'\t- Validating TFLite Model output "{name}":') if not output.shape == ref_output.shape: logger.error(f"\t\t-[x] shape {output.shape} doesn't match {ref_output.shape}") shape_failures.append((name, ref_output.shape, output.shape)) else: logger.info(f'\t\t-[✓] {output.shape} matches {ref_output.shape}') if not np.allclose(ref_output, output, atol=atol): max_diff = np.amax(np.abs(ref_output - output)) logger.error(f'\t\t-[x] values not close enough, max diff: {max_diff} (atol: {atol})') value_failures.append((name, max_diff)) else: logger.info(f'\t\t-[✓] all values close (atol: {atol})') if shape_failures: msg = '\n'.join((f'- {t[0]}: got {t[1]} (reference) and {t[2]} (TFLite)' for t in shape_failures)) raise ShapeError('Output shapes do not match between reference model and the TFLite exported model:\n{msg}') if value_failures: msg = '\n'.join((f'- {t[0]}: max diff = {t[1]}' for t in value_failures)) raise AtolError(f'The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance {atol}:\n{msg}') def create_representative_dataset(signatures, dataset: 'Dataset'): def representative_dataset(): for (sig_name, tf_function) in signatures.items(): inputs_to_keep = None for example in dataset: if inputs_to_keep is None: (args, kwargs) = tf_function.structured_input_signature args_to_keep = {input_.name for input_ in args if input_.name in example} kwargs_to_keep = {input_.name for input_ in kwargs.values() if input_.name in example} inputs_to_keep = args_to_keep | kwargs_to_keep yield (sig_name, {name: value for (name, value) in example.items() if name in inputs_to_keep}) return representative_dataset def prepare_converter_for_quantization(model: 'TFPreTrainedModel', config: 'TFLiteConfig', preprocessor: Optional[Preprocessor], signatures: Dict[str, Callable], quantization_config: 'TFLiteQuantizationConfig', converter: 'tf.lite.TFLiteConverter', task: Optional[str]=None): import tensorflow as tf if not config.supports_quantization_approach(quantization_config.approach) and (not quantization_config.fallback_to_float): raise QuantizationApproachNotSupported(f'{model.config.model_type} do not support full {quantization_config.approach} quantization, use fallback_to_float=True to fallback to the float implementation for the unsupported ops.') str_to_dtype = {'int8': tf.int8, 'uint8': tf.uint8} if quantization_config.approach in [QuantizationApproach.INT8, QuantizationApproach.INT8x16]: if preprocessor is None: raise ValueError('A preprocessor must be passed for INT8 and INT8x16 quantization since it is needed to preprocess the calibration dataset.') converter.optimizations = [tf.lite.Optimize.DEFAULT] if task is None: from ...exporters import TasksManager task = TasksManager.infer_task_from_model(model) preprocessor_kwargs = {} if isinstance(preprocessor, PreTrainedTokenizerBase): preprocessor_kwargs['max_length'] = config.sequence_length task_processor = TaskProcessorsManager.get_task_processor_class_for_task(task)(model.config, preprocessor, preprocessor_kwargs) if task == 'token-classification' and model.config.model_type in {'bloom', 'camembert', 'deberta', 'gpt2', 'roberta'}: preprocessor.add_prefix_space = True load_smallest_split = quantization_config.calibration_split is None if load_smallest_split: logger.warning('Since no calibration split was provided for the calibration dataset, the smallest split will be used if the dataset contains multiple splits.') batch_size = config.batch_size num_calibration_samples = quantization_config.num_calibration_samples if num_calibration_samples % batch_size != 0: new_num_calibration_samples = (num_calibration_samples // batch_size + 1) * batch_size logger.info(f'The number of calibration examples ({num_calibration_samples}) does not divide the batch size ({batch_size}), using {new_num_calibration_samples} examples instead.') num_calibration_samples = new_num_calibration_samples if quantization_config.calibration_dataset_name_or_path is None: calibration_dataset = task_processor.load_default_dataset(only_keep_necessary_columns=True, load_smallest_split=load_smallest_split, num_samples=num_calibration_samples, shuffle=True, split=quantization_config.calibration_split) else: data_keys = {} if quantization_config.primary_key is not None: data_keys['primary'] = quantization_config.primary_key if quantization_config.secondary_key is not None: data_keys['secondary'] = quantization_config.secondary_key if quantization_config.question_key is not None: data_keys['question'] = quantization_config.question_key if quantization_config.context_key is not None: data_keys['context'] = quantization_config.context_key if quantization_config.image_key is not None: data_keys['image'] = quantization_config.image_key calibration_dataset = task_processor.load_dataset(quantization_config.calibration_dataset_name_or_path, data_keys=data_keys, only_keep_necessary_columns=True, load_smallest_split=load_smallest_split, num_samples=num_calibration_samples, shuffle=True, name=quantization_config.calibration_dataset_config_name, split=quantization_config.calibration_split) if batch_size > 1: columns_needed_by_all_signatures = set() for tf_function in signatures.values(): (args, kwargs) = tf_function.structured_input_signature columns_needed_by_all_signatures |= {input_.name for input_ in args} columns_needed_by_all_signatures |= {input_.name for input_ in kwargs.values()} columns_to_remove = set(calibration_dataset.column_names) - columns_needed_by_all_signatures calibration_dataset = calibration_dataset.remove_columns(columns_to_remove) def batching_function(examples): return {column_name: [examples[column_name]] for column_name in examples.keys()} calibration_dataset = calibration_dataset.map(batching_function, batched=True, batch_size=batch_size) calibration_dataset = calibration_dataset.with_format('tf') converter.representative_dataset = create_representative_dataset(signatures, calibration_dataset) if quantization_config.approach is QuantizationApproach.INT8: opsset = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] else: logger.warning('The latency with 8x16 quantization can be much slower than int8 only because it is currently an experimental feature, use this only if necessary.') opsset = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8] if quantization_config.fallback_to_float: opsset.append(tf.lite.OpsSet.TFLITE_BUILTINS) converter.target_spec.supported_ops = opsset if quantization_config.inputs_dtype is not None: converter.inference_input_type = str_to_dtype[quantization_config.inputs_dtype] if quantization_config.outputs_dtype is not None: converter.inference_output_type = str_to_dtype[quantization_config.outputs_dtype] elif quantization_config.approach is QuantizationApproach.INT8_DYNAMIC: converter.optimizations = [tf.lite.Optimize.DEFAULT] elif quantization_config.approach is QuantizationApproach.FP16: converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] def export(model: 'TFPreTrainedModel', config: 'TFLiteConfig', output: Path, task: Optional[str]=None, preprocessor: Optional[Preprocessor]=None, quantization_config: Optional['TFLiteQuantizationConfig']=None) -> Tuple[List[str], List[str]]: if not is_tf_available(): raise ImportError('Cannot convert because TensorFlow is not installed. Please install TensorFlow first.') import tensorflow as tf output.parent.mkdir(parents=True, exist_ok=True) logger.info(f'Using TensorFlow: {tf.__version__}') model.config.return_dict = True if config.values_override is not None: logger.info(f'Overriding {len(config.values_override)} configuration item(s)') for (override_config_key, override_config_value) in config.values_override.items(): logger.info(f'\t- {override_config_key} -> {override_config_value}') setattr(model.config, override_config_key, override_config_value) signatures = config.model_to_signatures(model) with TemporaryDirectory() as tmp_dir_name: model.save(tmp_dir_name, signatures=signatures) converter = tf.lite.TFLiteConverter.from_saved_model(tmp_dir_name) if quantization_config is not None: prepare_converter_for_quantization(model, config, preprocessor, signatures, quantization_config, converter, task=task) tflite_model = converter.convert() with open(output, 'wb') as fp: fp.write(tflite_model) return (config.inputs, config.outputs) # File: optimum-main/optimum/exporters/tflite/model_configs.py """""" from typing import List from ...utils.normalized_config import NormalizedConfigManager from .base import QuantizationApproach from .config import TextEncoderTFliteConfig, VisionTFLiteConfig class BertTFLiteConfig(TextEncoderTFliteConfig): NORMALIZED_CONFIG_CLASS = NormalizedConfigManager.get_normalized_config_class('bert') SUPPORTED_QUANTIZATION_APPROACHES = (QuantizationApproach.INT8_DYNAMIC, QuantizationApproach.INT8, QuantizationApproach.FP16) @property def inputs(self) -> List[str]: return ['input_ids', 'attention_mask', 'token_type_ids'] class AlbertTFLiteConfig(BertTFLiteConfig): pass class ConvBertTFLiteConfig(BertTFLiteConfig): pass class ElectraTFLiteConfig(BertTFLiteConfig): pass class RoFormerTFLiteConfig(BertTFLiteConfig): pass class MobileBertTFLiteConfig(BertTFLiteConfig): pass class XLMTFLiteConfig(BertTFLiteConfig): pass class DistilBertTFLiteConfig(BertTFLiteConfig): @property def inputs(self) -> List[str]: return ['input_ids', 'attention_mask'] class MPNetTFLiteConfig(DistilBertTFLiteConfig): pass class RobertaTFLiteConfig(DistilBertTFLiteConfig): pass class CamembertTFLiteConfig(DistilBertTFLiteConfig): pass class FlaubertTFLiteConfig(BertTFLiteConfig): pass class XLMRobertaTFLiteConfig(DistilBertTFLiteConfig): SUPPORTED_QUANTIZATION_APPROACHES = {'default': BertTFLiteConfig.SUPPORTED_QUANTIZATION_APPROACHES, 'question-answering': (QuantizationApproach.INT8_DYNAMIC, QuantizationApproach.FP16)} class DebertaTFLiteConfig(BertTFLiteConfig): SUPPORTED_QUANTIZATION_APPROACHES = (QuantizationApproach.INT8_DYNAMIC, QuantizationApproach.FP16) @property def inputs(self) -> List[str]: common_inputs = super().inputs if self._config.type_vocab_size == 0: common_inputs.pop(-1) return common_inputs class DebertaV2TFLiteConfig(DebertaTFLiteConfig): pass class ResNetTFLiteConfig(VisionTFLiteConfig): NORMALIZED_CONFIG_CLASS = NormalizedConfigManager.get_normalized_config_class('resnet') @property def inputs(self) -> List[str]: return ['pixel_values'] # File: optimum-main/optimum/exporters/utils.py """""" import copy from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import torch from packaging import version from transformers.models.speecht5.modeling_speecht5 import SpeechT5HifiGan from transformers.utils import is_tf_available, is_torch_available from ..utils import DIFFUSERS_MINIMUM_VERSION, check_if_diffusers_greater, is_diffusers_available, logging from ..utils.import_utils import _diffusers_version from .tasks import TasksManager logger = logging.get_logger() if is_diffusers_available(): if not check_if_diffusers_greater(DIFFUSERS_MINIMUM_VERSION.base_version): raise ImportError(f'We found an older version of diffusers {_diffusers_version} but we require diffusers to be >= {DIFFUSERS_MINIMUM_VERSION}. Please update diffusers by running `pip install --upgrade diffusers`') from diffusers import DiffusionPipeline, LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0, AttnProcessor, AttnProcessor2_0, LoRAAttnProcessor, LoRAAttnProcessor2_0 if TYPE_CHECKING: from .base import ExportConfig if is_torch_available(): from transformers.modeling_utils import PreTrainedModel if is_tf_available(): from transformers.modeling_tf_utils import TFPreTrainedModel if is_diffusers_available(): from diffusers import DiffusionPipeline, ModelMixin ENCODER_NAME = 'encoder_model' DECODER_NAME = 'decoder_model' DECODER_WITH_PAST_NAME = 'decoder_with_past_model' DECODER_MERGED_NAME = 'decoder_model_merged' def _get_submodels_for_export_diffusion(pipeline: 'DiffusionPipeline') -> Dict[str, Union['PreTrainedModel', 'ModelMixin']]: is_stable_diffusion = isinstance(pipeline, (StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline)) is_stable_diffusion_xl = isinstance(pipeline, (StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline)) is_latent_consistency_model = isinstance(pipeline, (LatentConsistencyModelPipeline, LatentConsistencyModelImg2ImgPipeline)) if is_stable_diffusion_xl: projection_dim = pipeline.text_encoder_2.config.projection_dim elif is_stable_diffusion: projection_dim = pipeline.text_encoder.config.projection_dim elif is_latent_consistency_model: projection_dim = pipeline.text_encoder.config.projection_dim else: raise ValueError(f'The export of a DiffusionPipeline model with the class name {pipeline.__class__.__name__} is currently not supported in Optimum. Please open an issue or submit a PR to add the support.') models_for_export = {} text_encoder = getattr(pipeline, 'text_encoder', None) if text_encoder is not None: if is_stable_diffusion_xl: text_encoder.config.output_hidden_states = True models_for_export['text_encoder'] = text_encoder is_torch_greater_or_equal_than_2_1 = version.parse(torch.__version__) >= version.parse('2.1.0') if not is_torch_greater_or_equal_than_2_1: pipeline.unet.set_attn_processor(AttnProcessor()) pipeline.unet.config.text_encoder_projection_dim = projection_dim pipeline.unet.config.requires_aesthetics_score = getattr(pipeline.config, 'requires_aesthetics_score', False) models_for_export['unet'] = pipeline.unet vae_encoder = copy.deepcopy(pipeline.vae) if not is_torch_greater_or_equal_than_2_1: vae_encoder = override_diffusers_2_0_attn_processors(vae_encoder) vae_encoder.forward = lambda sample: {'latent_sample': vae_encoder.encode(x=sample)['latent_dist'].sample()} models_for_export['vae_encoder'] = vae_encoder vae_decoder = copy.deepcopy(pipeline.vae) if not is_torch_greater_or_equal_than_2_1: vae_decoder = override_diffusers_2_0_attn_processors(vae_decoder) vae_decoder.forward = lambda latent_sample: vae_decoder.decode(z=latent_sample) models_for_export['vae_decoder'] = vae_decoder text_encoder_2 = getattr(pipeline, 'text_encoder_2', None) if text_encoder_2 is not None: text_encoder_2.config.output_hidden_states = True text_encoder_2.text_model.config.output_hidden_states = True models_for_export['text_encoder_2'] = text_encoder_2 return models_for_export def _get_submodels_for_export_decoder(model: Union['PreTrainedModel', 'TFPreTrainedModel'], use_past: bool, legacy: bool=False) -> Dict[str, Union['PreTrainedModel', 'TFPreTrainedModel']]: models_for_export = {DECODER_NAME if legacy else 'model': model} if legacy and use_past: models_for_export[DECODER_WITH_PAST_NAME] = model return models_for_export def _get_submodels_for_export_encoder_decoder(model: Union['PreTrainedModel', 'TFPreTrainedModel'], use_past: bool) -> Dict[str, Union['PreTrainedModel', 'TFPreTrainedModel']]: models_for_export = {} encoder_model = model.get_encoder() models_for_export[ENCODER_NAME] = encoder_model models_for_export[DECODER_NAME] = model if use_past: models_for_export[DECODER_WITH_PAST_NAME] = model return models_for_export def get_encoder_decoder_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig') -> Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel'], 'ExportConfig']]: models_for_export = _get_submodels_for_export_encoder_decoder(model, use_past=config.use_past) encoder_export_config = config.with_behavior('encoder') models_for_export[ENCODER_NAME] = (models_for_export[ENCODER_NAME], encoder_export_config) decoder_export_config = config.with_behavior('decoder', use_past=config.use_past, use_past_in_inputs=False) models_for_export[DECODER_NAME] = (models_for_export[DECODER_NAME], decoder_export_config) if config.use_past: decoder_export_config_with_past = config.with_behavior('decoder', use_past=True, use_past_in_inputs=True) models_for_export[DECODER_WITH_PAST_NAME] = (models_for_export[DECODER_WITH_PAST_NAME], decoder_export_config_with_past) return models_for_export def get_decoder_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig', legacy: bool=False) -> Dict[str, Tuple[Union['PreTrainedModel', 'TFPreTrainedModel'], 'ExportConfig']]: models_for_export = _get_submodels_for_export_decoder(model, use_past=config.use_past, legacy=legacy) export_kwargs = {'task': config.task, 'float_dtype': config.float_dtype, 'int_dtype': config.int_dtype, 'legacy': legacy} if legacy: export_config = config.__class__(model.config, use_past=config.use_past, use_past_in_inputs=False, **export_kwargs) models_for_export[DECODER_NAME] = (models_for_export[DECODER_NAME], export_config) if config.use_past: export_config_with_past = config.__class__(model.config, use_past=True, use_past_in_inputs=True, **export_kwargs) models_for_export[DECODER_WITH_PAST_NAME] = (models_for_export[DECODER_WITH_PAST_NAME], export_config_with_past) else: export_config = config.__class__(model.config, use_past=config.use_past, use_past_in_inputs=config.use_past, **export_kwargs) models_for_export['model'] = (models_for_export['model'], export_config) return models_for_export def get_diffusion_models_for_export(pipeline: 'DiffusionPipeline', int_dtype: str='int64', float_dtype: str='fp32', exporter: str='onnx') -> Dict[str, Tuple[Union['PreTrainedModel', 'ModelMixin'], 'ExportConfig']]: models_for_export = _get_submodels_for_export_diffusion(pipeline) if 'text_encoder' in models_for_export: text_encoder_config_constructor = TasksManager.get_exporter_config_constructor(model=pipeline.text_encoder, exporter=exporter, library_name='diffusers', task='feature-extraction') text_encoder_export_config = text_encoder_config_constructor(pipeline.text_encoder.config, int_dtype=int_dtype, float_dtype=float_dtype) models_for_export['text_encoder'] = (models_for_export['text_encoder'], text_encoder_export_config) export_config_constructor = TasksManager.get_exporter_config_constructor(model=pipeline.unet, exporter=exporter, library_name='diffusers', task='semantic-segmentation', model_type='unet') unet_export_config = export_config_constructor(pipeline.unet.config, int_dtype=int_dtype, float_dtype=float_dtype) models_for_export['unet'] = (models_for_export['unet'], unet_export_config) vae_encoder = models_for_export['vae_encoder'] vae_config_constructor = TasksManager.get_exporter_config_constructor(model=vae_encoder, exporter=exporter, library_name='diffusers', task='semantic-segmentation', model_type='vae-encoder') vae_export_config = vae_config_constructor(vae_encoder.config, int_dtype=int_dtype, float_dtype=float_dtype) models_for_export['vae_encoder'] = (vae_encoder, vae_export_config) vae_decoder = models_for_export['vae_decoder'] vae_config_constructor = TasksManager.get_exporter_config_constructor(model=vae_decoder, exporter=exporter, library_name='diffusers', task='semantic-segmentation', model_type='vae-decoder') vae_export_config = vae_config_constructor(vae_decoder.config, int_dtype=int_dtype, float_dtype=float_dtype) models_for_export['vae_decoder'] = (vae_decoder, vae_export_config) if 'text_encoder_2' in models_for_export: export_config_constructor = TasksManager.get_exporter_config_constructor(model=pipeline.text_encoder_2, exporter=exporter, library_name='diffusers', task='feature-extraction', model_type='clip-text-with-projection') export_config = export_config_constructor(pipeline.text_encoder_2.config, int_dtype=int_dtype, float_dtype=float_dtype) models_for_export['text_encoder_2'] = (models_for_export['text_encoder_2'], export_config) return models_for_export def get_musicgen_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig'): models_for_export = {'text_encoder': model.text_encoder, 'encodec_decode': model.audio_encoder, DECODER_NAME: model, DECODER_WITH_PAST_NAME: model, 'build_delay_pattern_mask': model.decoder} text_encoder_config = config.__class__(model.config, task=config.task, legacy=False, model_part='text_encoder', variant=config.variant) models_for_export['text_encoder'] = (models_for_export['text_encoder'], text_encoder_config) audio_encoder_config = config.__class__(model.config, task=config.task, legacy=False, model_part='encodec_decode', variant=config.variant) models_for_export['encodec_decode'] = (models_for_export['encodec_decode'], audio_encoder_config) use_past = 'with-past' in config.variant decoder_export_config = config.with_behavior('decoder', use_past=use_past, use_past_in_inputs=False) decoder_export_config.model_part = 'decoder' models_for_export[DECODER_NAME] = (models_for_export[DECODER_NAME], decoder_export_config) if 'with-past' in config.variant: decoder_export_config_with_past = config.with_behavior('decoder', use_past=True, use_past_in_inputs=True) decoder_export_config_with_past.model_part = 'decoder' models_for_export[DECODER_WITH_PAST_NAME] = (models_for_export[DECODER_WITH_PAST_NAME], decoder_export_config_with_past) build_delay_pattern_mask_config = config.__class__(model.config, task=config.task, legacy=False, model_part='build_delay_pattern_mask', variant=config.variant) models_for_export['build_delay_pattern_mask'] = (models_for_export['build_delay_pattern_mask'], build_delay_pattern_mask_config) return models_for_export def _get_submodels_for_export_sam(model, variant): models_for_export = {} if variant == 'monolith': models_for_export['model'] = model else: models_for_export['vision_encoder'] = model models_for_export['prompt_encoder_mask_decoder'] = model return models_for_export def get_sam_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig'): models_for_export = _get_submodels_for_export_sam(model, config.variant) if config.variant == 'monolith': export_config = config.__class__(model.config, task=config.task, legacy=config.legacy) models_for_export['model'] = (models_for_export['model'], export_config) else: vision_encoder_export_config = config.__class__(model.config, task=config.task, variant=config.variant, vision_encoder=True, legacy=config.legacy) prompt_encoder_mask_decoder_export_config = config.__class__(model.config, task=config.task, variant=config.variant, vision_encoder=False, legacy=config.legacy) models_for_export['vision_encoder'] = (models_for_export['vision_encoder'], vision_encoder_export_config) models_for_export['prompt_encoder_mask_decoder'] = (models_for_export['prompt_encoder_mask_decoder'], prompt_encoder_mask_decoder_export_config) return models_for_export def get_speecht5_models_for_export(model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: 'ExportConfig', model_kwargs: Optional[Dict]): if model_kwargs is None or 'vocoder' not in model_kwargs: raise ValueError('The export of SpeechT5 requires a vocoder. Please pass `--model-kwargs \'{"vocoder": "vocoder_model_name_or_path"}\'` from the command line, or `model_kwargs={"vocoder": "vocoder_model_name_or_path"}` if calling main_export.') models_for_export = {} models_for_export['encoder_model'] = model models_for_export['decoder_model'] = model if config.variant == 'with-past': models_for_export['decoder_with_past_model'] = model vocoder = SpeechT5HifiGan.from_pretrained(model_kwargs['vocoder']).eval() model_kwargs['vocoder_model'] = vocoder models_for_export['decoder_postnet_and_vocoder'] = model encoder_export_config = config.with_behavior('encoder') use_past = config.variant == 'with-past' decoder_export_config = config.with_behavior('decoder', use_past=use_past, use_past_in_inputs=False) models_for_export[ENCODER_NAME] = (models_for_export[ENCODER_NAME], encoder_export_config) models_for_export[DECODER_NAME] = (models_for_export[DECODER_NAME], decoder_export_config) if config.variant == 'with-past': decoder_export_config_with_past = config.with_behavior('decoder', use_past=True, use_past_in_inputs=True) models_for_export[DECODER_WITH_PAST_NAME] = (models_for_export[DECODER_WITH_PAST_NAME], decoder_export_config_with_past) postnet_and_vocoder_export_config = config.__class__(config._config, task=config.task, int_dtype=config.int_dtype, float_dtype=config.float_dtype, use_past=use_past, use_past_in_inputs=False, behavior=config._behavior, preprocessors=config._preprocessors, is_postnet_and_vocoder=True, legacy=config.legacy) postnet_and_vocoder_export_config.variant = config.variant models_for_export['decoder_postnet_and_vocoder'] = (models_for_export['decoder_postnet_and_vocoder'], postnet_and_vocoder_export_config) return models_for_export def override_diffusers_2_0_attn_processors(model): for (_, submodule) in model.named_modules(): if isinstance(submodule, Attention): if isinstance(submodule.processor, AttnProcessor2_0): submodule.set_processor(AttnProcessor()) elif isinstance(submodule.processor, LoRAAttnProcessor2_0): lora_attn_processor = LoRAAttnProcessor(hidden_size=submodule.processor.hidden_size, cross_attention_dim=submodule.processor.cross_attention_dim, rank=submodule.processor.rank, network_alpha=submodule.processor.to_q_lora.network_alpha) lora_attn_processor.to_q_lora = copy.deepcopy(submodule.processor.to_q_lora) lora_attn_processor.to_k_lora = copy.deepcopy(submodule.processor.to_k_lora) lora_attn_processor.to_v_lora = copy.deepcopy(submodule.processor.to_v_lora) lora_attn_processor.to_out_lora = copy.deepcopy(submodule.processor.to_out_lora) submodule.set_processor(lora_attn_processor) elif isinstance(submodule.processor, AttnAddedKVProcessor2_0): submodule.set_processor(AttnAddedKVProcessor()) return model def _get_submodels_and_export_configs(model: Union['PreTrainedModel', 'TFPreTrainedModel', 'DiffusionPipeline'], task: str, monolith: bool, custom_export_configs: Dict, custom_architecture: bool, _variant: str, library_name: str, int_dtype: str='int64', float_dtype: str='fp32', fn_get_submodels: Optional[Callable]=None, preprocessors: Optional[List[Any]]=None, legacy: bool=False, model_kwargs: Optional[Dict]=None, exporter: str='onnx'): if not custom_architecture: if library_name == 'diffusers': export_config = None models_and_export_configs = get_diffusion_models_for_export(model, int_dtype=int_dtype, float_dtype=float_dtype, exporter=exporter) else: export_config_constructor = TasksManager.get_exporter_config_constructor(model=model, exporter=exporter, task=task, library_name=library_name) export_config = export_config_constructor(model.config, int_dtype=int_dtype, float_dtype=float_dtype, preprocessors=preprocessors, legacy=legacy) export_config.variant = _variant all_variants = '\n'.join([f' - {name}: {description}' for (name, description) in export_config.VARIANTS.items()]) logger.info(f'Using the export variant {export_config.variant}. Available variants are:\n{all_variants}') if model.config.is_encoder_decoder and task.startswith(TasksManager._ENCODER_DECODER_TASKS) and (not monolith): models_and_export_configs = get_encoder_decoder_models_for_export(model, export_config) elif task.startswith('text-generation') and (not monolith): models_and_export_configs = get_decoder_models_for_export(model, export_config, legacy=legacy) elif model.config.model_type == 'sam': models_and_export_configs = get_sam_models_for_export(model, export_config) elif model.config.model_type == 'speecht5': models_and_export_configs = get_speecht5_models_for_export(model, export_config, model_kwargs) elif model.config.model_type == 'musicgen': models_and_export_configs = get_musicgen_models_for_export(model, export_config) else: models_and_export_configs = {'model': (model, export_config)} for (key, custom_export_config) in custom_export_configs.items(): models_and_export_configs[key] = (models_and_export_configs[key][0], custom_export_config) else: export_config = None submodels_for_export = None models_and_export_configs = {} if fn_get_submodels is not None: submodels_for_export = fn_get_submodels(model) elif library_name == 'diffusers': submodels_for_export = _get_submodels_for_export_diffusion(model) elif model.config.is_encoder_decoder and task.startswith(TasksManager._ENCODER_DECODER_TASKS) and (not monolith): submodels_for_export = _get_submodels_for_export_encoder_decoder(model, use_past=task.endswith('-with-past')) elif task.startswith('text-generation') and (not monolith): submodels_for_export = _get_submodels_for_export_decoder(model, use_past=task.endswith('-with-past')) else: submodels_for_export = {'model': model} if submodels_for_export.keys() != custom_export_configs.keys(): logger.error(f"{exporter.upper()} custom configs for: {', '.join(custom_export_configs.keys())}") logger.error(f"Submodels to export: {', '.join(submodels_for_export.keys())}") raise ValueError(f'Trying to export a custom model, but could not find as many custom {exporter.upper()} configs as the number of submodels to export. Please specifiy the fn_get_submodels argument, that should return a dictionary of submodules with as many items as the provided custom_export_configs dictionary.') for (key, custom_export_config) in custom_export_configs.items(): models_and_export_configs[key] = (submodels_for_export[key], custom_export_config) if export_config is None: export_config = next(iter(models_and_export_configs.values()))[1] return (export_config, models_and_export_configs) # File: optimum-main/optimum/fx/optimization/__init__.py from .transformations import ChangeTrueDivToMulByInverse, FuseBatchNorm1dInLinear, FuseBatchNorm2dInConv2d, FuseBiasInLinear, MergeLinears, ReversibleTransformation, Transformation, compose # File: optimum-main/optimum/fx/optimization/transformations.py import collections import copy import functools import itertools import operator import warnings from abc import ABC, abstractmethod from typing import List import torch from torch.fx import GraphModule, Node from transformers.file_utils import add_end_docstrings try: from transformers.utils.fx import _gen_constructor_wrapper except ImportError: from transformers.utils.fx import gen_constructor_wrapper def _gen_constructor_wrapper(*args, **kwargs): (wrapper, target) = gen_constructor_wrapper(*args, **kwargs) def wrapper_with_forced_tracing(*_args, **_kwargs): import torch.fx._symbolic_trace orginal_flag = torch.fx._symbolic_trace._is_fx_tracing_flag torch.fx._symbolic_trace._is_fx_tracing_flag = True out = wrapper(*_args, **_kwargs) torch.fx._symbolic_trace._is_fx_tracing_flag = orginal_flag return out return (wrapper_with_forced_tracing, target) _ATTRIBUTES_DOCSTRING = '\nAttributes:\n preserves_computation (`bool`, defaults to `False`):\n Whether the transformation preserves the graph computation or not. If `True`, the original and the\n transformed graph should produce the same outputs.\n' _EXAMPLE_DOCSTRING = '\n```python\n>>> from transformers import BertModel\n>>> from transformers.utils.fx import symbolic_trace\n>>> from optimum.fx.optimization import {class_name}\n\n>>> model = BertModel.from_pretrained("bert-base-uncased")\n>>> traced = symbolic_trace(\n... model,\n... input_names=["input_ids", "attention_mask", "token_type_ids"],\n... )\n>>> transformation = {class_name}()\n>>> transformed_model = transformation(traced)\n```\n' _REVERSIBLE_EXAMPLE_DOCSTRING = '\n```python\n>>> from transformers import BertModel\n>>> from transformers.utils.fx import symbolic_trace\n>>> from optimum.fx.optimization import {class_name}\n\n>>> model = BertModel.from_pretrained("bert-base-uncased")\n>>> traced = symbolic_trace(\n... model,\n... input_names=["input_ids", "attention_mask", "token_type_ids"],\n... )\n>>> transformation = {class_name}()\n>>> transformed_model = transformation(traced)\n>>> restored_model = transformation(transformed_model, reverse=True)\n```\n' def add_docstring(add_example=True): def wrapper(class_): example_docstring = _EXAMPLE_DOCSTRING if 'ReversibleTransformation' in (cls.__name__ for cls in class_.mro()): example_docstring = _REVERSIBLE_EXAMPLE_DOCSTRING new_doc = [f'{class_.__doc__}', f'{_ATTRIBUTES_DOCSTRING}'] if add_example: new_doc.append('Example:') new_doc.append(f'\t{example_docstring.format(class_name=class_.__name__)}') class_.__doc__ = '\n'.join(new_doc) return class_ return wrapper @add_docstring(add_example=False) class Transformation(ABC): preserves_computation: bool = False @abstractmethod def transform(self, graph_module: 'GraphModule') -> 'GraphModule': raise NotImplementedError('The transform method needs to be implemented.') def __call__(self, graph_module: 'GraphModule', lint_and_recompile: bool=True) -> 'GraphModule': graph_module = self.transform(graph_module) if lint_and_recompile: graph_module.graph.lint() graph_module.recompile() return graph_module @property def signature(self): attributes_to_use_for_hashing = vars(self) attributes_to_use_for_hashing[''] = self.__class__ hash_str = '_'.join((f'{k}_{hash(v)}' for (k, v) in attributes_to_use_for_hashing.items())) return hash(hash_str) def mark_as_transformed(self, node: 'Node'): node_transformations = getattr(node, 'transformations', set()) node_transformations.add(self.signature) node.transformations = node_transformations def transformed(self, node: 'Node') -> bool: return self.signature in getattr(node, 'transformations', set()) def get_transformed_nodes(self, graph_module: 'GraphModule') -> List['Node']: return [node for node in graph_module.graph.nodes if self.transformed(node)] @add_docstring(add_example=False) class ReversibleTransformation(Transformation): @abstractmethod def reverse(self, graph_module: 'GraphModule') -> 'GraphModule': raise NotImplementedError('The reverse transform method needs to be implemented.') def __call__(self, graph_module: 'GraphModule', lint_and_recompile: bool=True, reverse: bool=False) -> 'GraphModule': func = self.transform if not reverse else self.reverse graph_module = func(graph_module) if lint_and_recompile: graph_module.graph.lint() graph_module.recompile() return graph_module def mark_as_restored(self, node: 'Node'): node_transformations = getattr(node, 'transformations', set()) if self.signature not in node_transformations: raise ValueError('The node was not transformed by this transformation.') node_transformations.remove(self.signature) @add_docstring() class MergeLinears(ReversibleTransformation): preserves_computation = True @staticmethod def _get_bias(linear: torch.nn.Linear) -> torch.Tensor: if linear.bias is not None: return linear.bias return torch.zeros(linear.out_features, dtype=linear.weight.dtype).to(linear.weight.device) @staticmethod def _get_linear_module_name(linear_node): return linear_node.target.split('.')[-1] @staticmethod def _linear_node_to_module_and_attribute_name(graph_module, linear_node_target): names = linear_node_target.split('.') mod = graph_module if len(names) > 1: for name in names[:-1]: mod = getattr(mod, name) return (mod, names[-1]) def _merge_linears(self, graph_module: 'GraphModule', input_node: 'Node', linear_nodes: List['Node'], linears: List[torch.nn.Linear]): in_features = linears[0].in_features out_features = [linear.out_features for linear in linears] total_out_features = sum(out_features) use_bias = any((hasattr(linear, 'bias') for linear in linears)) if use_bias and (not all((hasattr(linear, 'bias') for linear in linears))): warnings.warn('Not all the linear layers that are merged contain a bias, but some do. By merging, this is equivalent to adding a bias to the layers missing one.') merged_linear = torch.nn.Linear(in_features, total_out_features, bias=use_bias) dtype = linears[0].weight.dtype device = linears[0].weight.device with torch.no_grad(): new_weight = torch.cat([linear.weight for linear in linears], dim=0).to(dtype=dtype, device=device) merged_linear.weight = torch.nn.Parameter(new_weight) if use_bias: new_bias = torch.cat([MergeLinears._get_bias(linear) for linear in linears], dim=0).to(dtype=dtype, device=device) merged_linear.bias = torch.nn.Parameter(new_bias) linear_module_names = [MergeLinears._get_linear_module_name(node) for node in linear_nodes] merged_linear_name = '-'.join(linear_module_names + ['merged']) fully_qualified_parent_name = linear_nodes[0].target.rsplit('.', maxsplit=1)[0] parent_module = graph_module.get_submodule(fully_qualified_parent_name) parent_module.add_module(merged_linear_name, merged_linear) for linear_node in linear_nodes: (mod, name) = MergeLinears._linear_node_to_module_and_attribute_name(graph_module, linear_node.target) delattr(mod, name) graph = graph_module.graph with graph.inserting_before(linear_nodes[0]): fully_qualified_merged_linear_name = '.'.join([fully_qualified_parent_name, merged_linear_name]) merged_linear_node = graph.call_module(fully_qualified_merged_linear_name, args=(input_node,)) self.mark_as_transformed(merged_linear_node) merged_linear_node.linear_node_targets = [n.target for n in linear_nodes] accum_out_features = list(itertools.accumulate([0] + out_features)) for (idx, node) in enumerate(linear_nodes): node.op = 'call_function' node.target = operator.getitem slice_to_get = slice(accum_out_features[idx], accum_out_features[idx + 1]) node.args = (merged_linear_node, (Ellipsis, slice_to_get)) @staticmethod def _unmerge_linears(graph_module: 'GraphModule', merged_linear_node: 'Node', merged_linear: torch.nn.Linear): linear_node_targets = merged_linear_node.linear_node_targets output_nodes = sorted(merged_linear_node.users, key=lambda node: node.args[1][1].start) in_features = merged_linear.in_features out_features = [] for node in output_nodes: slice_to_get = node.args[1][1] out_features.append(slice_to_get.stop - slice_to_get.start) linears = [torch.nn.Linear(in_features, out_feat, bias=hasattr(merged_linear, 'bias'), device=merged_linear.weight.device, dtype=merged_linear.weight.dtype) for out_feat in out_features] for (target, node, linear) in zip(linear_node_targets, output_nodes, linears): with torch.no_grad(): slice_to_get = node.args[1][1] linear.weight = torch.nn.Parameter(merged_linear.weight[slice_to_get.start:slice_to_get.stop]) if hasattr(merged_linear, 'bias'): linear.bias = torch.nn.Parameter(merged_linear.bias[slice_to_get.start:slice_to_get.stop]) (parent_module, name) = MergeLinears._linear_node_to_module_and_attribute_name(graph_module, target) parent_module.add_module(name, linear) node.op = 'call_module' node.target = target node.args = (merged_linear_node.args[0],) (parent_module, merged_linear_name) = MergeLinears._linear_node_to_module_and_attribute_name(graph_module, merged_linear_node.target) delattr(parent_module, merged_linear_name) graph_module.graph.erase_node(merged_linear_node) def transform(self, graph_module: 'GraphModule') -> 'GraphModule': candidates = collections.defaultdict(list) for node in graph_module.graph.nodes: if node.op == 'call_module': mod = graph_module.get_submodule(node.target) if isinstance(mod, torch.nn.Linear): input_node = node.args[0] candidates[input_node].append((node, mod)) candidates = {k: v for (k, v) in candidates.items() if len(v) > 1} for (input_node, t) in candidates.items(): (linear_nodes, linears) = list(zip(*t)) self._merge_linears(graph_module, input_node, linear_nodes, linears) return graph_module def reverse(self, graph_module: 'GraphModule') -> 'GraphModule': for node in self.get_transformed_nodes(graph_module): self._unmerge_linears(graph_module, node, graph_module.get_submodule(node.target)) return graph_module @add_docstring() class FuseBiasInLinear(ReversibleTransformation): preserves_computation = True def transform(self, graph_module: 'GraphModule') -> 'GraphModule': torch_ones = _gen_constructor_wrapper(torch.ones)[0] def insert_concat(linear_input): shape = linear_input.shape[:-1] + (1,) return torch.cat([linear_input, torch_ones(shape, device=linear_input.device)], dim=-1) tracer = torch.fx.proxy.GraphAppendingTracer(graph_module.graph) for node in graph_module.graph.nodes: if node.op == 'call_module': module = graph_module.get_submodule(node.target) if isinstance(module, torch.nn.Linear) and module.bias is not None: with graph_module.graph.inserting_before(node): n = node.args[0] node.nodes_to_ignore = set() while n is not node: node.nodes_to_ignore.add(n) n = n.next linear_input_proxy = torch.fx.Proxy(node.args[0], tracer) output_proxy = insert_concat(linear_input_proxy) node.start_node = linear_input_proxy.node node.end_node = output_proxy.node node.args = (output_proxy.node,) self.mark_as_transformed(node) new_weight = torch.nn.Parameter(torch.cat([module.weight, module.bias[:, None]], dim=1)) module.weight = new_weight module.bias = None return graph_module def reverse(self, graph_module: 'GraphModule') -> 'GraphModule': for node in self.get_transformed_nodes(graph_module): node.args = (node.start_node,) n = node.end_node while n is not node.start_node: if n not in node.nodes_to_ignore: graph_module.graph.erase_node(n) n = n.prev self.mark_as_restored(node) module = graph_module.get_submodule(node.target) new_weight = torch.nn.Parameter(module.weight[:, :-1]) new_bias = torch.nn.Parameter(module.weight[:, -1].squeeze()) module.weight = new_weight module.bias = new_bias return graph_module @add_docstring() class ChangeTrueDivToMulByInverse(ReversibleTransformation): preserves_computation = True def transform(self, graph_module: 'GraphModule') -> 'GraphModule': graph = graph_module.graph for node in graph.nodes: if node.op == 'call_function' and node.target == operator.truediv: (x, y) = node.args if not isinstance(y, torch.fx.Node): node.target = operator.mul node.args = (x, 1 / y) self.mark_as_transformed(node) return graph_module def reverse(self, graph_module: 'GraphModule') -> 'GraphModule': for node in self.get_transformed_nodes(graph_module): node.target = operator.truediv (x, y) = node.args node.args = (x, 1 / y) self.mark_as_restored(node) return graph_module @add_end_docstrings(_ATTRIBUTES_DOCSTRING) class FuseBatchNorm2dInConv2d(Transformation): preserves_computation = True def transform(self, graph_module: 'GraphModule') -> 'GraphModule': for node in graph_module.graph.nodes: if node.op == 'call_module' and node.args[0].op == 'call_module': if type(graph_module.get_submodule(node.target)) is torch.nn.BatchNorm2d and type(graph_module.get_submodule(node.args[0].target)) is torch.nn.Conv2d: if len(node.args[0].users) > 1: continue fused_conv = self.fuse(conv2d=graph_module.get_submodule(node.args[0].target), bn2d=graph_module.get_submodule(node.target)) (parent_name, _, name) = node.args[0].target.rpartition('.') parent_module = graph_module.get_submodule(parent_name) setattr(parent_module, name, fused_conv) (parent_name, _, name) = node.target.rpartition('.') parent_module = graph_module.get_submodule(parent_name) delattr(parent_module, name) node.replace_all_uses_with(node.args[0]) graph_module.graph.erase_node(node) return graph_module def fuse(self, conv2d: torch.nn.Conv2d, bn2d: torch.nn.BatchNorm2d): conv_b = conv2d.bias if conv2d.bias is not None else torch.zeros_like(bn2d.running_mean) bn_w = bn2d.weight if bn2d.weight is not None else torch.ones_like(bn2d.running_mean) bn_b = bn2d.bias if bn2d.bias is not None else torch.ones_like(bn2d.running_mean) bn_var_rsqrt = torch.rsqrt(bn2d.running_var + bn2d.eps) conv2d.weight = torch.nn.Parameter(conv2d.weight * (bn_w * bn_var_rsqrt).reshape([-1] + [1] * (len(conv2d.weight.shape) - 1))) conv2d.bias = torch.nn.Parameter(conv_b - bn2d.running_mean * bn_var_rsqrt * bn_w + bn_b) return conv2d @add_end_docstrings(_ATTRIBUTES_DOCSTRING) class FuseBatchNorm1dInLinear(Transformation): preserves_computation = True def transform(self, graph_module: 'GraphModule') -> 'GraphModule': for node in graph_module.graph.nodes: if node.op == 'call_module' and node.args[0].op == 'call_module': if type(graph_module.get_submodule(node.target)) is torch.nn.BatchNorm1d and type(graph_module.get_submodule(node.args[0].target)) is torch.nn.Linear: if len(node.args[0].users) > 1: continue candidate_linear = graph_module.get_submodule(node.args[0].target) candidate_batchnorm1d = graph_module.get_submodule(node.target) if candidate_linear.weight.shape[0] == candidate_batchnorm1d.weight.shape[0]: fused_linear = self.fuse(linear=candidate_linear, bn1d=candidate_batchnorm1d, bn1d_before=False) (parent_name, _, name) = node.args[0].target.rpartition('.') parent_module = graph_module.get_submodule(parent_name) setattr(parent_module, name, fused_linear) (parent_name, _, name) = node.target.rpartition('.') parent_module = graph_module.get_submodule(parent_name) delattr(parent_module, name) node.replace_all_uses_with(node.args[0]) graph_module.graph.erase_node(node) elif type(graph_module.get_submodule(node.target)) is torch.nn.Linear and type(graph_module.get_submodule(node.args[0].target)) is torch.nn.BatchNorm1d: if len(node.args[0].users) > 1: continue candidate_linear = graph_module.get_submodule(node.target) candidate_batchnorm1d = graph_module.get_submodule(node.args[0].target) if candidate_batchnorm1d.weight.shape[0] == candidate_linear.weight.shape[1]: fused_linear = self.fuse(linear=candidate_linear, bn1d=candidate_batchnorm1d, bn1d_before=True) (parent_name, _, name) = node.target.rpartition('.') parent_module = graph_module.get_submodule(parent_name) setattr(parent_module, name, fused_linear) (parent_name, _, name) = node.args[0].target.rpartition('.') parent_module = graph_module.get_submodule(parent_name) delattr(parent_module, name) batchnorm_node = node.args[0] node.args[0].replace_all_uses_with(node.args[0].args[0]) graph_module.graph.erase_node(batchnorm_node) return graph_module def fuse(self, linear: torch.nn.Linear, bn1d: torch.nn.BatchNorm1d, bn1d_before: bool): linear_b = linear.bias if linear.bias is not None else torch.zeros_like(bn1d.running_mean) bn_w = bn1d.weight if bn1d.weight is not None else torch.ones_like(bn1d.running_mean) bn_b = bn1d.bias if bn1d.bias is not None else torch.ones_like(bn1d.running_mean) bn_var_rsqrt = torch.rsqrt(bn1d.running_var + bn1d.eps) if bn1d_before: linear.bias = torch.nn.Parameter(linear.weight @ (-bn_w * bn1d.running_mean * bn_var_rsqrt + bn_b) + linear_b) linear.weight = torch.nn.Parameter(linear.weight * (bn_w * bn_var_rsqrt)[None, :]) else: linear.bias = torch.nn.Parameter((linear_b - bn1d.running_mean) * bn_var_rsqrt * bn_w + bn_b) linear.weight = torch.nn.Parameter(linear.weight * (bn_w * bn_var_rsqrt)[:, None]) return linear class DeepCopy(ReversibleTransformation): preserves_computation = True def transform(self, graph_module: 'GraphModule') -> 'GraphModule': clone = copy.deepcopy(graph_module) for (n1, n2) in zip(graph_module.graph.nodes, clone.graph.nodes): if hasattr(n1, 'transformations'): n2.transformations = n1.transformations return clone def reverse(self, graph_module: 'GraphModule') -> 'GraphModule': return self.transform(graph_module) class LintAndRecompile(ReversibleTransformation): preserves_computation = True def transform(self, graph_module: 'GraphModule') -> 'GraphModule': graph_module.graph.lint() graph_module.recompile() return graph_module def reverse(self, graph_module: 'GraphModule') -> 'GraphModule': return self.transform(graph_module) def compose(*args: Transformation, inplace: bool=True) -> Transformation: transformations = list(reversed(args)) composition_preserves_computation = all((t.preserves_computation for t in transformations)) composition_is_reversible = all((isinstance(t, ReversibleTransformation) for t in transformations)) if not inplace: transformations.append(DeepCopy()) if not composition_is_reversible: def reduce_fn(f, g): def composition(graph_module, lint_and_recompile=False): return f(g(graph_module, lint_and_recompile=lint_and_recompile)) return composition class ComposeTransformation(Transformation): preserves_computation = composition_preserves_computation _composition = functools.reduce(reduce_fn, transformations) def transform(self, graph_module): return ComposeTransformation._composition(graph_module) else: def make_reduce_fn(reverse): def reduce_fn(f, g): def composition(graph_module, lint_and_recompile=False, reverse=reverse): return f(g(graph_module, lint_and_recompile=lint_and_recompile, reverse=reverse), lint_and_recompile=lint_and_recompile, reverse=reverse) return composition return reduce_fn class ComposeTransformation(ReversibleTransformation): preserves_computation = composition_preserves_computation _composition = functools.reduce(make_reduce_fn(False), transformations) _reverse_composition = functools.reduce(make_reduce_fn(True), reversed(transformations)) def transform(self, graph_module): return ComposeTransformation._composition(graph_module) def reverse(self, graph_module): return ComposeTransformation._reverse_composition(graph_module) return ComposeTransformation() # File: optimum-main/optimum/fx/parallelization/api.py import importlib import os from functools import partial from typing import Callable, List import torch from torch.fx import GraphModule from transformers import AutoConfig from .core import Config, ParallelExecutionCtx from .passes import build_parallel_pass_pipeline from .utils import MetaAwareMethodsPatcher, download_model_from_hf, initialize_parameter_meta, move_model_to_device, try_collect_weight_map def parallelize_backend(graph_module: GraphModule, example_inputs: List[torch.Tensor], ctx: ParallelExecutionCtx, config: Config) -> GraphModule: ctx.example_inputs = example_inputs pass_pipeline = build_parallel_pass_pipeline() graph_module = pass_pipeline(graph_module=graph_module, ctx=ctx, config=config) ctx.compile_times += 1 ctx.last_optimized_graph_module = graph_module return graph_module def parallelize_model(model: str, parallel_ctx: ParallelExecutionCtx, *model_args, **kwargs) -> Callable: revision = kwargs.pop('revision', 'main') cache_dir = kwargs.pop('cache_dir', None) local_files_only = kwargs.pop('local_files_only', False) skip_load_weights = kwargs.pop('skip_load_weights', False) parallel_config = Config() for (k, v) in dict(kwargs).items(): if k in parallel_config.__dict__: setattr(parallel_config, k, v) kwargs.pop(k) is_local = os.path.isdir(model) if not is_local: hf_folder = download_model_from_hf(model_name_or_path=model, cache_dir=cache_dir, revision=revision, local_files_only=local_files_only, skip_download_weights=skip_load_weights) else: hf_folder = model (model_config, kwargs) = AutoConfig.from_pretrained(hf_folder, revision=revision, local_files_only=True, return_unused_kwargs=True, **kwargs) model_arch = model_config.architectures model_cls = getattr(importlib.import_module('transformers'), model_arch[0]) if not skip_load_weights: parallel_ctx.weight_map = try_collect_weight_map(model, cache_dir, hf_folder) (torch_dtype, dtype_orig) = (kwargs.pop('torch_dtype', None), None) if torch_dtype is not None: dtype_orig = model_cls._set_default_torch_dtype(torch_dtype) with MetaAwareMethodsPatcher(): model = model_cls(model_config, *model_args, **kwargs) model.eval() if dtype_orig is not None: torch.set_default_dtype(dtype_orig) move_model_to_device(model, device=parallel_ctx.current_device) initialize_parameter_meta(model) backend = partial(parallelize_backend, ctx=parallel_ctx, config=parallel_config) model = torch.compile(model, fullgraph=True, backend=backend) return model # File: optimum-main/optimum/fx/parallelization/core.py from dataclasses import dataclass, field from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.fx import GraphModule class HashableSlice: def __init__(self, start: Optional[int]=None, stop: Optional[int]=None, step: Optional[int]=None) -> None: self.start = start self.stop = stop self.step = step def __hash__(self) -> int: return hash(f'{self.start},{self.stop},{self.step}') def __eq__(self, value: object) -> bool: return isinstance(value, HashableSlice) and self.start == value.start and (self.stop == value.stop) and (self.step == value.step) def to_slice(self) -> slice: return slice(self.start, self.stop, self.step) @dataclass class ParameterSlice: source: Optional[str] = None shape: Optional[Tuple] = None index: slice = slice(None, None, None) @dataclass class ParameterMeta: is_tied: bool = False is_parallel: bool = False is_modified_meta: bool = False need_initialize: bool = False init_fn: Optional[Callable] = None dim: int = 0 mapping: Dict[HashableSlice, ParameterSlice] = field(default_factory=dict) @dataclass class ParallelExecutionCtx: tp_group: dist.ProcessGroup current_device: torch.device example_inputs: List[Any] = field(default_factory=list) parallel_layer_cache: Dict[str, nn.Module] = field(default_factory=dict) param_cache: Dict[str, nn.Parameter] = field(default_factory=dict) weight_map: Dict[str, str] = field(default_factory=dict) last_optimized_graph_module: Optional[GraphModule] = None compile_times: int = 0 @dataclass class Config: lint_and_recompile: bool = True clean_markers_after_all_passes: bool = True weight_init_fn: Callable = partial(nn.init.normal_, std=0.02) enable_sequence_parallel: bool = False # File: optimum-main/optimum/fx/parallelization/decomp.py import contextlib from typing import Callable, Dict, List import torch import torch.nn.functional as F import torch.utils._pytree as pytree from torch import SymBool, SymFloat, SymInt from torch._decomp import core_aten_decompositions from torch._functorch._aot_autograd.functional_utils import from_fun, to_fun from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode, disable_functional_mode from torch.fx import Graph, GraphModule, Interpreter, Proxy, traceback from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, _ProxyTensor, _SymNodeDict, decompose, disable_proxy_modes_tracing, fetch_object_proxy, fetch_sym_proxy, get_proxy_slot, track_tensor_tree from torch.fx.proxy import GraphAppendingTracer from torch.utils.weak import WeakTensorKeyDictionary def is_leaf_module(m): return (m.__module__.startswith('torch.nn') or m.__module__.startswith('torch.ao.nn')) and (not isinstance(m, torch.nn.Sequential)) @contextlib.contextmanager def trace_decomp_origin(): creat_node = Graph.create_node def create_node_(*args, **kwargs): node = creat_node(*args, **kwargs) node.meta['traced_from'] = traceback.get_current_meta()['from_node'] return node try: Graph.create_node = create_node_ yield finally: Graph.create_node = creat_node class DecompTracer(GraphAppendingTracer): def __init__(self, graph: Graph): super().__init__(graph) self.tensor_tracker = WeakTensorKeyDictionary() self.symnode_tracker = _SymNodeDict() class DecompositionInterpreter(Interpreter): def __init__(self, module: GraphModule, new_graph: Graph, decomposition_table=None, leaf_function_targets=None, **kwargs): super().__init__(module, **kwargs) self.new_graph = new_graph self.tracer = DecompTracer(new_graph) self.decomposition_table = decomposition_table if self.decomposition_table is None: self.decomposition_table = {} self.leaf_function_targets = leaf_function_targets if self.leaf_function_targets is None: self.leaf_function_targets = [] self.fun_mode = FunctionalTensorMode() self.mode = ProxyTorchDispatchMode(self.tracer, tracing_mode='real') def placeholder(self, target, args, kwargs): out = super().placeholder(target, args, kwargs) out = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), out) proxy = self.tracer.create_proxy('placeholder', target, args, kwargs) with disable_proxy_modes_tracing(): track_tensor_tree(out, proxy, constant=None, tracer=self.tracer) out = pytree.tree_map_only(torch.Tensor, lambda x: to_fun(x), out) return out def call_function(self, target, args, kwargs): if target in self.leaf_function_targets: args = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), args) kwargs = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), kwargs) with disable_proxy_modes_tracing(), disable_functional_mode(): out = target(*args, **kwargs) (args, kwargs) = pytree.tree_map_only((torch.Tensor,), fetch_object_proxy(self.tracer), (args, kwargs)) (proxy_args, proxy_kwargs) = pytree.tree_map_only((SymInt, SymFloat, SymBool), fetch_sym_proxy(self.tracer), pytree.tree_map_only(_ProxyTensor, lambda e: e.proxy, (args, kwargs))) proxy = self.tracer.create_proxy('call_function', target, proxy_args, proxy_kwargs) with disable_proxy_modes_tracing(): track_tensor_tree(out, proxy, constant=None, tracer=self.tracer) out = pytree.tree_map_only(torch.Tensor, lambda x: to_fun(x), out) return out return super().call_function(target, args, kwargs) def call_module(self, target, args, kwargs): assert isinstance(target, str) submod = self.fetch_attr(target) if not is_leaf_module(submod): return super().call_module(target, args, kwargs) args = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), args) kwargs = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), kwargs) with disable_proxy_modes_tracing(), disable_functional_mode(): out = submod(*args, **kwargs) (args, kwargs) = pytree.tree_map_only((torch.Tensor,), fetch_object_proxy(self.tracer), (args, kwargs)) (proxy_args, proxy_kwargs) = pytree.tree_map_only((SymInt, SymFloat, SymBool), fetch_sym_proxy(self.tracer), pytree.tree_map_only(_ProxyTensor, lambda e: e.proxy, (args, kwargs))) proxy = self.tracer.create_proxy('call_module', target, proxy_args, proxy_kwargs) with disable_proxy_modes_tracing(): track_tensor_tree(out, proxy, constant=None, tracer=self.tracer) out = pytree.tree_map_only(torch.Tensor, lambda x: to_fun(x), out) return out def get_attr(self, target, args, kwargs): out = super().get_attr(target, args, kwargs) proxy = Proxy(self.new_graph.get_attr(target), self.tracer) with disable_proxy_modes_tracing(): track_tensor_tree(out, proxy, constant=None, tracer=self.tracer) return out def output(self, target, args, kwargs): args = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), args) kwargs = pytree.tree_map_only(FunctionalTensor, lambda x: from_fun(x), kwargs) out = super().output(target, args, kwargs) def unwrap(e): return get_proxy_slot(e, self.tracer, e, lambda x: x.proxy.node) self.new_graph.output(pytree.tree_map(unwrap, out)) return out def run(self, *args, **kwargs): with self.fun_mode: args = pytree.tree_map_only(torch.Tensor, lambda x: to_fun(x), args) kwargs = pytree.tree_map_only(torch.Tensor, lambda x: to_fun(x), kwargs) with traceback.preserve_node_meta(), trace_decomp_origin(), decompose(self.decomposition_table), self.mode: return super().run(*args, **kwargs) def decompose_and_functionalize(graph_module: GraphModule, decomposition_table: Dict[torch._ops.OperatorBase, Callable]=core_aten_decompositions(), leaf_function_targets: List[Callable]=[F.scaled_dot_product_attention]) -> Callable: new_graph = Graph(owning_module=graph_module) interp = DecompositionInterpreter(graph_module, new_graph, decomposition_table, leaf_function_targets) def wrapper(*args, **kwargs): interp.run(*args, **kwargs) return new_graph return wrapper # File: optimum-main/optimum/fx/parallelization/distributed/dist_ops.py import torch import torch.distributed as dist from ..utils import ensure_divisibility def all_reduce(group: dist.ProcessGroup, tensor: torch.Tensor) -> torch.Tensor: world_size = dist.get_world_size(group) if world_size == 1: return tensor dist.all_reduce(tensor, group=group) return tensor def all_gather(group: dist.ProcessGroup, tensor: torch.Tensor, gather_dim: int=-1) -> torch.Tensor: world_size = dist.get_world_size(group) if world_size == 1: return tensor gather_dim = (gather_dim + tensor.ndim) % tensor.ndim shape = [tensor.size(dim) * world_size if dim == gather_dim else tensor.size(dim) for dim in range(tensor.ndim)] if gather_dim != 0: (shape[0], shape[gather_dim]) = (shape[gather_dim], shape[0]) tensors = torch.empty(*shape, dtype=tensor.dtype, device=tensor.device) if gather_dim != 0: tensor = tensor.transpose(0, gather_dim) tensor = tensor.contiguous() dist.all_gather_into_tensor(tensors, tensor, group=group) if gather_dim != 0: tensors = tensors.transpose(0, gather_dim).contiguous() return tensors def split(group: dist.ProcessGroup, tensor: torch.Tensor, split_dim: int=-1) -> torch.Tensor: world_size = dist.get_world_size(group) if world_size == 1: return tensor rank = dist.get_rank(group) size = tensor.size() ensure_divisibility(size[split_dim], world_size) tensors = torch.split(tensor, size[split_dim] // world_size, dim=split_dim) tensor = tensors[rank].contiguous() return tensor def scatter(group: dist.ProcessGroup, tensor: torch.Tensor, output_tensor: torch.Tensor, scatter_dim: int=0) -> torch.Tensor: world_size = dist.get_world_size(group) if world_size == 1: output_tensor.copy_(tensor) return tensor rank = dist.get_rank(group) if rank == 0: size = tensor.size() ensure_divisibility(size[scatter_dim], world_size) tensors = torch.split(tensor, size[scatter_dim] // world_size, dim=scatter_dim) scatter_list = [tensor.contiguous() for tensor in tensors] output_tensor.copy_(scatter_list[rank]) else: scatter_list = None dist.scatter(tensor=output_tensor, scatter_list=scatter_list, src=0, group=group) return output_tensor class DifferentiableIdentity(torch.autograd.Function): @staticmethod def forward(ctx, tensor, group: dist.ProcessGroup): ctx.group = group return tensor @staticmethod def backward(ctx, grad_output): group = ctx.group return (DifferentiableAllReduceSum.apply(grad_output, group), None) class DifferentiableAllReduceSum(torch.autograd.Function): @staticmethod def forward(ctx, tensor: torch.Tensor, group: dist.ProcessGroup) -> torch.Tensor: ctx.group = group return all_reduce(group=group, tensor=tensor) @staticmethod def backward(ctx, grad_output: torch.Tensor) -> torch.Any: return (grad_output, None) class DifferentiableScatter(torch.autograd.Function): @staticmethod def forward(ctx, tensor: torch.Tensor, group: dist.ProcessGroup, dim: int=-1) -> torch.Tensor: ctx.group = group ctx.dim = dim return split(group=group, tensor=tensor, split_dim=dim) @staticmethod def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: return (DifferentiableAllGather.apply(grad_output, group=ctx.group, dim=ctx.dim), None, None) class DifferentiableAllGather(torch.autograd.Function): @staticmethod def forward(ctx, tensor: torch.Tensor, group: dist.ProcessGroup, dim: int=-1) -> torch.Tensor: ctx.group = group ctx.dim = dim return all_gather(group=group, tensor=tensor, gather_dim=dim) @staticmethod def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: return (DifferentiableScatter.apply(grad_output, group=ctx.group, dim=ctx.dim), None, None) def differentiable_all_reduce_sum(tensor: torch.Tensor, group: dist.ProcessGroup) -> torch.Tensor: return DifferentiableAllReduceSum.apply(tensor, group) def differentiable_identity(tensor: torch.Tensor, group: dist.ProcessGroup) -> torch.Tensor: return DifferentiableIdentity.apply(tensor, group) def differentiable_all_gather(tensor: torch.Tensor, group: dist.ProcessGroup, dim=-1) -> torch.Tensor: return DifferentiableAllGather.apply(tensor, group, dim) def differentiable_scatter(tensor: torch.Tensor, group: dist.ProcessGroup, dim=-1) -> torch.Tensor: return DifferentiableScatter.apply(tensor, group, dim) # File: optimum-main/optimum/fx/parallelization/op_registry/op_handlers.py from abc import abstractmethod from typing import Any, List, Optional import torch from torch.fx import Node from ..core import Config from ..utils import is_activation, is_embedding, is_linear class Registry: def __init__(self) -> None: self.mapping = {} def register(self, op_types): def wrapper(cls): if isinstance(op_types, (list, tuple)): for op_type in op_types: self.mapping[op_type] = cls else: self.mapping[op_types] = cls return cls return wrapper def is_supported(self, op_type) -> bool: return op_type in self.mapping REGISTRY = Registry() class OpParallelAxisPropagateHandler: def __init__(self, node: Node, meta_key: str, config: Config) -> None: self.node = node self.meta_key = meta_key self.config = config def extract_axis(self, arg: Any) -> Optional[int]: if not isinstance(arg, Node): return None return arg.meta[self.meta_key].get('parallel_axis', None) @abstractmethod def propagate(self) -> List[int]: raise NotImplementedError @REGISTRY.register([torch.ops.aten.pow.Tensor_Scalar, torch.ops.aten.rsqrt.default, torch.ops.aten.clone.default, torch.ops.aten.bitwise_not.default, torch.ops.aten.abs.default, torch.ops.aten._to_copy.default, torch.ops.aten.acos.default, torch.ops.aten.acosh.default, torch.ops.aten.alias.default, torch.ops.aten.asin.default, torch.ops.aten.asinh.default, torch.ops.aten.atan.default, torch.ops.aten.atanh.default, torch.ops.aten.ceil.default, torch.ops.aten.clamp.default, torch.ops.aten.cos.default, torch.ops.aten.cosh.default, torch.ops.aten.erf.default, torch.ops.aten.exp.default, torch.ops.aten.trunc.default, torch.ops.aten.tanh.default, torch.ops.aten.tan.default, torch.ops.aten.add.Scalar, torch.ops.aten.sub.Scalar, torch.ops.aten.sqrt.default, torch.ops.aten.sin.default, torch.ops.aten.sinh.default, torch.ops.aten.sign.default, torch.ops.aten.sigmoid.default, torch.ops.aten.round.default, torch.ops.aten.remainder.Scalar, torch.ops.aten.relu.default, torch.ops.aten.reciprocal.default, torch.ops.aten.neg.default, torch.ops.aten.ne.Scalar, torch.ops.aten.native_dropout.default, torch.ops.aten.mul.Scalar, torch.ops.aten.logical_not.default, torch.ops.aten.lt.Scalar, torch.ops.aten.le.Scalar, torch.ops.aten.log.default, torch.ops.aten.log10.default, torch.ops.aten.log2.default, torch.ops.aten.log1p.default, torch.ops.aten.leaky_relu.default, torch.ops.aten.isnan.default, torch.ops.aten.isinf.default, torch.ops.aten.hardtanh.default, torch.ops.aten.gt.Scalar, torch.ops.aten.gelu.default, torch.ops.aten.ge.Scalar, torch.ops.aten.fmod.Scalar, torch.ops.aten.floor.default, torch.ops.aten.fill.Scalar, torch.ops.aten.div.Scalar_mode, torch.ops.aten.div.Scalar, torch.ops.aten.bitwise_and.Scalar, torch.ops.aten.bitwise_or.Scalar, torch.ops.aten.bitwise_xor.Scalar]) class UnaryOpParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: arg = self.node.all_input_nodes[0] axis = self.extract_axis(arg) return [axis] @REGISTRY.register([torch.ops.aten.atan2.default, torch.ops.aten.add.Tensor, torch.ops.aten.bitwise_and.Tensor, torch.ops.aten.bitwise_or.Tensor, torch.ops.aten.bitwise_xor.Tensor, torch.ops.aten.div.Tensor, torch.ops.aten.div.Tensor_mode, torch.ops.aten.eq.Tensor, torch.ops.aten.fmod.Tensor, torch.ops.aten.ge.Tensor, torch.ops.aten.gt.Tensor, torch.ops.aten.le.Tensor, torch.ops.aten.logical_and.default, torch.ops.aten.logical_or.default, torch.ops.aten.logical_xor.default, torch.ops.aten.lt.Tensor, torch.ops.aten.maximum.default, torch.ops.aten.minimum.default, torch.ops.aten.mul.Tensor, torch.ops.aten.ne.Tensor, torch.ops.aten.pow.Tensor_Tensor, torch.ops.aten.remainder.Tensor, torch.ops.aten.sub.Tensor]) class BinaryOpParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: input_nodes = self.node.all_input_nodes if len(input_nodes) == 1: return UnaryOpParallelAxisPropagateHandler(self.node, self.meta_key, self.config).propagate() assert len(input_nodes) == 2, 'binary op should have exact two nodes as inputs' (lhs_shape, rhs_shape) = (input_nodes[0].meta['val'].shape, input_nodes[1].meta['val'].shape) lhs_axis = self.extract_axis(input_nodes[0]) rhs_axis = self.extract_axis(input_nodes[1]) (i, j) = (len(lhs_shape) - 1, len(rhs_shape) - 1) while i >= 0 and j >= 0: k = max(lhs_shape[i], rhs_shape[j]) assert k % min(lhs_shape[i], rhs_shape[j]) == 0, f'shape {lhs_shape} and {rhs_shape} are not broadcastable!' i -= 1 j -= 1 if i < 0 and lhs_axis is not None: lhs_axis += j + 1 if j < 0 and rhs_axis is not None: rhs_axis += i + 1 if lhs_axis is None: return [rhs_axis] elif rhs_axis is None: return [lhs_axis] elif lhs_axis != rhs_axis: return [] return [lhs_axis] @REGISTRY.register([torch.ops.aten.amax.default, torch.ops.aten.amin.default, torch.ops.aten.any.dim, torch.ops.aten._log_softmax.default, torch.ops.aten._softmax.default, torch.ops.aten.cumsum.default, torch.ops.aten.mean.dim, torch.ops.aten.var.dim, torch.ops.aten.sum.dim_IntList, torch.ops.aten.prod.dim_int]) class ReductionOpParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def extract_dims(self) -> List[int]: ndim = self.node.meta['val'].ndim dims = None if 'dim' in self.node.kwargs: dims = self.node.kwargs['dim'] elif len(self.node.args) > 1 and isinstance(self.node.args[1], (int, list)): dims = self.node.args[1] if isinstance(dims, int): dims = [dims] if not dims: dims = list(range(ndim)) dims = [(dim + ndim) % ndim for dim in dims] keepdim = False if 'keepdim' in self.node.kwargs: keepdim = self.node.kwargs elif len(self.node.args) > 2 and isinstance(self.node.args[2], bool): keepdim = self.node.args[2] return (dims, keepdim) def propagate(self) -> List[int]: (dims, keepdim) = self.extract_dims() arg = self.node.all_input_nodes[0] axis = self.extract_axis(arg) if axis in dims: return [] if axis is None: return [None] if keepdim: return [axis] return [axis - sum([1 if dim < axis else 0 for dim in dims])] @REGISTRY.register(torch.ops.aten.view.default) class ViewLikeOpParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: arg = self.node.args[0] axis = self.extract_axis(arg) if axis is None: return [None] (shape_before, shape_after) = (arg.meta['val'].shape, self.node.meta['val'].shape) size = 1 for i in range(len(shape_before) - 1, axis - 1, -1): size *= shape_before[i] (cur, i, res) = (1, len(shape_after) - 1, []) while cur <= size and i >= 0: cur *= shape_after[i] if cur == size: res.append(i) i -= 1 return res @REGISTRY.register(torch.ops.aten.unsqueeze.default) class UnsqueezeParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (arg, dim) = (self.node.args[0], self.node.args[1]) ndim = arg.meta['val'].ndim axis = self.extract_axis(arg) if axis is None: return [None] dim = (dim + ndim) % ndim if dim <= axis: return [axis + 1] return [axis] @REGISTRY.register([torch.ops.aten.squeeze.dim, torch.ops.aten.squeeze.dims]) class SqueezeParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (arg, dims) = (self.node.args[0], self.node.args[1]) axis = self.extract_axis(arg) if axis is None: return [None] ndim = self.node.args[0].meta['val'].ndim if isinstance(dims, int): dims = [dims] dims = [(dim + ndim) % ndim for dim in dims] if axis in dims: return [] return [axis - sum([1 if dim < axis else 0 for dim in dims])] @REGISTRY.register(torch.ops.aten.permute.default) class PermuteParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (arg, dims) = (self.node.args[0], self.node.args[1]) ndim = arg.meta['val'].ndim axis = self.extract_axis(arg) if axis is None: return [None] for (i, dim) in enumerate(dims): if (dim + ndim) % ndim == axis: return [i] return [] @REGISTRY.register(torch.ops.aten.slice.Tensor) class SliceParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (arg, slice_dim) = (self.node.args[0], self.node.args[1]) axis = self.extract_axis(arg) if axis is None: return [None] ndim = arg.meta['val'].ndim slice_dim = (slice_dim + ndim) % ndim if slice_dim == axis: return [] return [axis] @REGISTRY.register(torch.ops.aten.expand.default) class ExpandParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (arg, size) = (self.node.args[0], self.node.args[1]) axis = self.extract_axis(arg) if axis is None: return [None] assert len(size) >= arg.meta['val'].ndim, 'input size must be broadcastable to the target size in expand' return [axis + len(size) - arg.meta['val'].ndim] @REGISTRY.register(torch.ops.aten.cat.default) class CatParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (nodes, cat_axis) = (self.node.all_input_nodes, self.node.args[1]) (axis, ndim) = (self.extract_axis(nodes[0]), nodes[0].meta['val'].ndim) cat_axis = (cat_axis + ndim) % ndim if cat_axis == axis: return [] for i in range(1, len(nodes)): if self.extract_axis(nodes[i]) != axis: return [] return [axis] @REGISTRY.register(torch.ops.aten.constant_pad_nd.default) class PadParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (pad, ndim) = (self.node.args[1], self.node.args[0].meta['val'].ndim) axis = self.extract_axis(self.node.args[0]) if axis is None: return [None] if axis >= ndim - pad // 2: return [] return [axis] @REGISTRY.register(torch.ops.aten.copy.default) class CopyParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (dst, src) = self.node.all_input_nodes axis_dst = self.extract_axis(dst) axis_src = self.extract_axis(src) if axis_dst != axis_src: return [] return [axis_dst] @REGISTRY.register(torch.nn.functional.scaled_dot_product_attention) class SpdaAttnParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: (q, k, v) = self.node.args[:3] q_axis = self.extract_axis(q) if q_axis != self.extract_axis(k) or q_axis != self.extract_axis(v) or q_axis not in {None, 1}: return [] return [q_axis] class FallbackParallelAxisPropagateHandler(OpParallelAxisPropagateHandler): def propagate(self) -> List[int]: if self.node.op in ['placeholder', 'get_attr']: return [None] elif self.node.op == 'output': for node in self.node.all_input_nodes: if self.extract_axis(node) is not None: return [] return [None] elif is_linear(self.node): input_arg = self.node.all_input_nodes[0] axis = self.extract_axis(input_arg) if axis is None: return [2, None] elif self.config.enable_sequence_parallel and axis == 1: return [2, None] elif axis == 2: return [1, None] if self.config.enable_sequence_parallel else [None] else: return [] elif is_embedding(self.node): input_arg = self.node.all_input_nodes[0] axis = self.extract_axis(input_arg) if axis is None: return [1, None] if self.config.enable_sequence_parallel else [None] else: return [] elif is_activation(self.node): return UnaryOpParallelAxisPropagateHandler(self.node, self.meta_key, self.config).propagate() if all((self.extract_axis(arg) is None for arg in self.node.all_input_nodes)): return [None] raise NotImplementedError(f"don't know how to propagate axis for {self.node.target}") # File: optimum-main/optimum/fx/parallelization/parallel_layers/embedding.py import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from ..core import ParallelExecutionCtx, ParameterMeta from ..distributed import differentiable_all_reduce_sum from ..utils import ensure_divisibility class VocabParallelEmbedding(nn.Module): def __init__(self, ctx: ParallelExecutionCtx, embedding: nn.Embedding): super(VocabParallelEmbedding, self).__init__() self.process_group = ctx.tp_group world_size = dist.get_world_size(self.process_group) tp_rank = dist.get_rank(self.process_group) ensure_divisibility(embedding.num_embeddings, world_size) num_embeddings = embedding.num_embeddings // world_size self.padding_idx = embedding.padding_idx self.max_norm = embedding.max_norm self.norm_type = embedding.norm_type self.scale_grad_by_freq = embedding.scale_grad_by_freq self.sparse = embedding.sparse self.vocab_start_idx = tp_rank * num_embeddings self.vocab_end_idx = (tp_rank + 1) * num_embeddings weight_meta = getattr(embedding.weight, 'meta', None) assert isinstance(weight_meta, ParameterMeta), 'should have run `initialize_parameter_meta` after moving model to current device' if weight_meta.is_modified_meta: assert weight_meta.is_tied, 'only tied parameters could already have modified meta' else: weight_meta.need_initialize = True weight_meta.is_parallel = True weight_meta.dim = 0 for (_, Slice) in weight_meta.mapping.items(): Slice.index = slice(self.vocab_start_idx, self.vocab_end_idx) weight_meta.is_modified_meta = True self.weight = embedding.weight def forward(self, input: torch.Tensor) -> torch.Tensor: input_mask = (input < self.vocab_start_idx) | (input >= self.vocab_end_idx) masked_input = input.clone() - self.vocab_start_idx masked_input[input_mask] = 0 output = F.embedding(masked_input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) output[input_mask, :] = 0.0 output = differentiable_all_reduce_sum(output, self.process_group) return output # File: optimum-main/optimum/fx/parallelization/parallel_layers/linear.py import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from ..core import ParallelExecutionCtx, ParameterMeta from ..distributed import differentiable_all_gather, differentiable_all_reduce_sum, differentiable_identity, differentiable_scatter from ..utils import ensure_divisibility class ColumnParallelLinear(nn.Module): def __init__(self, ctx: ParallelExecutionCtx, linear: nn.Linear, gather_output: bool=True) -> None: super(ColumnParallelLinear, self).__init__() self.process_group = ctx.tp_group world_size = dist.get_world_size(self.process_group) tp_rank = dist.get_rank(self.process_group) ensure_divisibility(linear.out_features, world_size) out_features = linear.out_features // world_size bias = linear.bias is not None weight_meta = getattr(linear.weight, 'meta', None) assert isinstance(weight_meta, ParameterMeta), 'should have run `initialize_parameter_meta` after moving model to current device' if weight_meta.is_modified_meta: assert weight_meta.is_tied, 'only tied parameters could already have modified meta' else: weight_meta.need_initialize = True weight_meta.is_parallel = True weight_meta.dim = 0 for (_, Slice) in weight_meta.mapping.items(): Slice.index = slice(tp_rank * out_features, (tp_rank + 1) * out_features) weight_meta.is_modified_meta = True self.weight = linear.weight self.gather_output = gather_output if bias: bias_meta = getattr(linear.bias, 'meta', None) assert isinstance(bias_meta, ParameterMeta), 'should have run `initialize_parameter_meta` after moving model to current device' if bias_meta.is_modified_meta: assert bias_meta.is_tied, 'only tied parameters could already have modified meta' else: bias_meta.need_initialize = True bias_meta.is_parallel = True bias_meta.init_fn = torch.zero_ bias_meta.dim = 0 for (_, Slice) in bias_meta.mapping.items(): Slice.index = slice(tp_rank * out_features, (tp_rank + 1) * out_features) bias_meta.is_modified_meta = True self.bias = linear.bias else: self.register_parameter('bias', None) def forward(self, input: torch.Tensor) -> torch.Tensor: input = differentiable_identity(input, self.process_group) output = F.linear(input, self.weight, self.bias) if self.gather_output: output = differentiable_all_gather(output, self.process_group) return output class RowParallelLinear(nn.Module): def __init__(self, ctx: ParallelExecutionCtx, linear: nn.Linear, input_is_parallel: bool=False) -> None: super(RowParallelLinear, self).__init__() self.process_group = ctx.tp_group world_size = dist.get_world_size(self.process_group) tp_rank = dist.get_rank(self.process_group) ensure_divisibility(linear.in_features, world_size) in_features = linear.in_features // world_size bias = linear.bias is not None weight_meta = getattr(linear.weight, 'meta', None) assert isinstance(weight_meta, ParameterMeta), 'should have run `initialize_parameter_meta` after moving model to current device' if weight_meta.is_modified_meta: assert weight_meta.is_tied, 'only tied parameters could already have modified meta' else: weight_meta.need_initialize = True weight_meta.is_parallel = True weight_meta.dim = 1 for (_, Slice) in weight_meta.mapping.items(): Slice.index = slice(tp_rank * in_features, (tp_rank + 1) * in_features) weight_meta.is_modified_meta = True self.weight = linear.weight self.input_is_parallel = input_is_parallel if bias: bias_meta = getattr(linear.bias, 'meta', None) assert isinstance(bias_meta, ParameterMeta), 'should have run `initialize_parameter_meta` after moving model to current device' if bias_meta.is_modified_meta: assert bias_meta.is_tied, 'only tied parameters could already have modified meta' else: bias_meta.need_initialize = True bias_meta.init_fn = torch.zero_ bias_meta.is_modified_meta = True self.bias = linear.bias else: self.register_parameter('bias', None) def forward(self, input: torch.Tensor) -> torch.Tensor: if not self.input_is_parallel: input = differentiable_scatter(input, self.process_group) output = F.linear(input, self.weight) output = differentiable_all_reduce_sum(output, self.process_group) if self.bias is not None: output = output + self.bias return output # File: optimum-main/optimum/fx/parallelization/passes.py from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Dict, List import torch import torch.distributed as dist import torch.nn as nn from torch.fx import Graph, GraphModule, Node from .core import Config, ParallelExecutionCtx, ParameterMeta from .decomp import decompose_and_functionalize from .distributed import scatter from .op_registry import REGISTRY, FallbackParallelAxisPropagateHandler from .parallel_layers import ColumnParallelLinear, RowParallelLinear, VocabParallelEmbedding from .utils import is_embedding, is_linear, is_shape_consumer, stable_topological_sort class PassBase(ABC): need_rerun_when_recompile: bool = True @classmethod def signature(cls) -> str: return cls.__name__ @abstractmethod def run(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: raise NotImplementedError def __call__(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: if not self.need_rerun_when_recompile and ctx.compile_times > 0: return graph_module graph_module = self.run(graph_module, ctx=ctx, config=config) if config.lint_and_recompile: graph_module.graph.lint() graph_module.recompile() return graph_module class AnalyzeBase(PassBase): @classmethod def meta_key(cls) -> str: return cls.signature() @classmethod def get_stored_field_info(cls, node: Node, field: Any, must_have: bool=False) -> Any: if not cls.already_executed_per_node(node): if not must_have: return None else: raise RuntimeError(f"Can't find information related with {cls.__name__} in the current node `{node}` make sure {cls.__name__} has run and marked it") info: Dict[Any, Any] = node.meta[cls.meta_key()] if field not in info: if must_have: raise KeyError(f'Invalid query field {field} for {cls.__name__}, valid fields are {list(info.keys())}') return None return info[field] @classmethod def already_executed_per_node(cls, node: Node) -> bool: return cls.meta_key() in node.meta def place_marker_per_node(self, node: Node, info: Dict[Any, Any]) -> None: if self.already_executed_per_node(node): raise RuntimeError(f'Node {node} has already been marked by the current pass, check if the current pass has already been executed in the pipeline') node.meta[self.meta_key()] = info def clear_marker_per_node(self, node: Node) -> None: key = self.meta_key() if key in node.meta: node.meta.pop(key) def clean_all(self, graph_module: GraphModule) -> None: g: Graph = graph_module.graph for node in g.nodes: self.clear_marker_per_node(node) class ParallelAxisSolverPass(AnalyzeBase): def trace_back(self, graph_module: GraphModule, decomp_graph: Graph) -> None: node_map = {node.name: node for node in graph_module.graph.nodes} for node in decomp_graph.nodes: if 'traced_from' in node.meta: (node_name, _) = node.meta['traced_from'][0] assert node_name in node_map, f'un-recognized node origin {node_name} not in graph being traced' orig_node = node_map[node_name] self.clear_marker_per_node(orig_node) self.place_marker_per_node(orig_node, {'parallel_axis': self.get_stored_field_info(node, field='parallel_axis')}) def run(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: graph: Graph = decompose_and_functionalize(graph_module)(*ctx.example_inputs) stable_topological_sort(graph) nodes = list(graph.nodes) def search(idx: int): if idx == len(nodes): return True node = nodes[idx] if node.op == 'call_function' and REGISTRY.is_supported(node.target): prop_cls = REGISTRY.mapping[node.target] else: prop_cls = FallbackParallelAxisPropagateHandler prop = prop_cls(node, self.meta_key(), config) axis_candidates = prop.propagate() for axis in axis_candidates: self.place_marker_per_node(node, {'parallel_axis': axis}) if search(idx + 1): return True self.clear_marker_per_node(node) return False if not search(0): raise RuntimeError('Failed to find a solution to automatically parallelize ops in graph in greedy way.') self.trace_back(graph_module, graph) return graph_module class ParallelLayerAnnotatePass(AnalyzeBase): def run(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: for node in graph_module.graph.nodes: if is_linear(node): axis_before = ParallelAxisSolverPass.get_stored_field_info(node.args[0], 'parallel_axis') axis_after = ParallelAxisSolverPass.get_stored_field_info(node, 'parallel_axis') info = {} if axis_before is None: info['axis'] = 'column' info['gather_output'] = True if axis_after is None else False elif axis_before == 1: assert config.enable_sequence_parallel, 'illegal parallel axis for sequence parallelism deactivated setting' info['axis'] = 'column' info['sequence_parallel'] = True info['gather_output'] = True if axis_after is None else False elif axis_before == 2: info['axis'] = 'row' info['input_is_parallel'] = True if axis_after == 1: assert config.enable_sequence_parallel, 'illegal parallel axis for sequence parallelism deactivated setting' info['sequence_parallel'] = True else: info['sequence_parallel'] = False self.place_marker_per_node(node, info) elif is_embedding(node): axis_before = ParallelAxisSolverPass.get_stored_field_info(node.args[0], 'parallel_axis') axis_after = ParallelAxisSolverPass.get_stored_field_info(node, 'parallel_axis') assert axis_before is None and axis_after in [1, None] info = {'axis': 'vocab'} if axis_after == 1: assert config.enable_sequence_parallel, 'illegal parallel axis for sequence parallelism deactivated setting' info['sequence_parallel'] = True else: info['sequence_parallel'] = False self.place_marker_per_node(node, info) return graph_module class ParallelLayerReplacePass(PassBase): @staticmethod def handle_linear(node: Node, ctx: ParallelExecutionCtx) -> None: graph_module = node.graph.owning_module axis = ParallelLayerAnnotatePass.get_stored_field_info(node, field='axis') if axis is None: return assert axis in {'column', 'row'} prefix_and_field = node.target.rsplit('.', maxsplit=1) if len(prefix_and_field) == 2: parent_mod = graph_module.get_submodule(prefix_and_field[0]) field = prefix_and_field[1] else: parent_mod = graph_module field = node.target mod: nn.Linear = graph_module.get_submodule(node.target) (key, layer_cache) = (node.target, ctx.parallel_layer_cache) if key in layer_cache: new_mod = layer_cache[key] else: if axis == 'column': gather_output = ParallelLayerAnnotatePass.get_stored_field_info(node, field='gather_output', must_have=True) new_mod = ColumnParallelLinear(ctx, mod, gather_output) else: input_is_parallel = ParallelLayerAnnotatePass.get_stored_field_info(node, field='input_is_parallel', must_have=True) new_mod = RowParallelLinear(ctx, mod, input_is_parallel) layer_cache[key] = new_mod setattr(parent_mod, field, new_mod) @staticmethod def handle_embedding(node: Node, ctx: ParallelExecutionCtx) -> None: graph_module = node.graph.owning_module axis = ParallelLayerAnnotatePass.get_stored_field_info(node, field='axis') if axis is None: return assert axis in {'vocab'}, 'Only support parallelization on vocab dim for now.' prefix_and_field = node.target.rsplit('.', maxsplit=1) if len(prefix_and_field) == 2: parent_mod = graph_module.get_submodule(prefix_and_field[0]) field = prefix_and_field[1] else: parent_mod = graph_module field = node.target mod: nn.Embedding = graph_module.get_submodule(node.target) (key, layer_cache) = (node.target, ctx.parallel_layer_cache) if key in layer_cache: new_mod = layer_cache[key] else: assert ctx.compile_times == 0, 'illegal path for recompilation' new_mod = VocabParallelEmbedding(ctx, mod) layer_cache[key] = new_mod setattr(parent_mod, field, new_mod) @staticmethod def handle_hard_coded_axis_param(node: Node, ctx: ParallelExecutionCtx) -> None: def extract_shape_from_node(node: Node) -> List[Any]: if 'size' in node.kwargs: return list(node.kwargs['size']) elif 'shape' in node.kwargs: return list(node.kwargs['shape']) elif isinstance(node.args[1], tuple): return list(node.args[1]) else: return list(node.args[1:]) def update(node: Node, new_shape: List[Any], parallel_axis: int): if 'size' in node.kwargs: node.update_kwarg('size', tuple(new_shape)) elif 'shape' in node.kwargs: node.update_kwarg('shape', tuple(new_shape)) elif isinstance(node.args[1], tuple): node.update_arg(1, tuple(new_shape)) else: node.update_arg(parallel_axis + 1, shape[parallel_axis]) parallel_axis = ParallelAxisSolverPass.get_stored_field_info(node, field='parallel_axis') if parallel_axis is None: return shape = extract_shape_from_node(node) assert parallel_axis < len(shape) if not isinstance(shape[parallel_axis], int) or shape[parallel_axis] == -1: return world_size = ctx.tp_group.size() assert shape[parallel_axis] % world_size == 0 shape[parallel_axis] = shape[parallel_axis] // world_size update(node, shape, parallel_axis) def run(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: for node in graph_module.graph.nodes: if is_linear(node): self.handle_linear(node, ctx) elif is_embedding(node): self.handle_embedding(node, ctx) elif is_shape_consumer(node): self.handle_hard_coded_axis_param(node, ctx) return graph_module class InitializeOrLoadWeightsPass(PassBase): def run(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: world_size = dist.get_world_size(ctx.tp_group) tp_rank = dist.get_rank(ctx.tp_group) (new_parameters, tied_parameters, param_cache) = ([], {}, ctx.param_cache) for (name, param) in sorted(graph_module.named_parameters(remove_duplicate=False)): if name in param_cache: new_parameters.append((name, param_cache[name])) continue param_meta: ParameterMeta = getattr(param, 'meta') if param_meta.is_tied and id(param) in tied_parameters: new_parameters.append((name, tied_parameters[id(param)])) continue shape = [param.size(dim) // world_size if dim == param_meta.dim and param_meta.is_parallel else param.size(dim) for dim in range(param.ndim)] if not param_meta.is_parallel and param.device == ctx.current_device: new_param = param else: new_param = nn.Parameter(torch.zeros(*shape, dtype=param.dtype, device=ctx.current_device), requires_grad=param.requires_grad) for (source, target) in sorted(param_meta.mapping.items()): if target.source in ctx.weight_map: from safetensors import safe_open with safe_open(ctx.weight_map[target.source], framework='pt', device='cpu') as fp: tensor_slice = fp.get_slice(target.source) source_index = [source.to_slice() if dim == param_meta.dim else slice(None, None, None) for dim in range(param.ndim)] load_index = [target.index if dim == param_meta.dim else slice(None, None, None) for dim in range(param.ndim)] tensor = tensor_slice[load_index].contiguous() tensor = torch.empty_like(tensor).copy_(tensor) with torch.no_grad(): new_param.data[source_index].copy_(tensor) if param_meta.need_initialize: for (source, target) in sorted(param_meta.mapping.items()): if target.source in ctx.weight_map: continue if not param_meta.is_parallel or tp_rank == 0: weight = torch.empty(*target.shape, dtype=param.dtype, device='cpu') init_fn = param_meta.init_fn if param_meta.init_fn else config.weight_init_fn init_fn(weight) weight = weight.to(ctx.current_device) else: weight = None index = [source.to_slice() if dim == param_meta.dim else slice(None, None, None) for dim in range(param.ndim)] with torch.no_grad(): if param_meta.is_parallel: scatter(ctx.tp_group, weight, new_param.data[index], scatter_dim=param_meta.dim) else: new_param.data[index].copy_(weight) setattr(new_param, 'meta', param_meta) if id(new_param) != id(param): new_parameters.append((name, new_param)) if param_meta.is_tied: tied_parameters[id(param)] = new_param for (name, new_param) in new_parameters: prefix_and_field = name.rsplit('.', maxsplit=1) if len(prefix_and_field) == 2: parent_mod = graph_module.get_submodule(prefix_and_field[0]) field = prefix_and_field[1] else: parent_mod = graph_module field = name if name not in param_cache: param_cache[name] = new_param setattr(parent_mod, field, new_param) return graph_module def build_parallel_pass_pipeline() -> PassPipeline: return PassPipeline([ParallelAxisSolverPass(), ParallelLayerAnnotatePass(), ParallelLayerReplacePass(), InitializeOrLoadWeightsPass()]) class PassPipeline: def __init__(self, passes: List[PassBase]=[]) -> None: self._passes = passes def __iter__(self): return self._passes.__iter__() def append(self, PASS: PassBase) -> None: self._passes.append(PASS) def __call__(self, graph_module: GraphModule, ctx: ParallelExecutionCtx, config: Config) -> GraphModule: for PASS in self._passes: graph_module = PASS(graph_module=graph_module, ctx=ctx, config=config) if config.clean_markers_after_all_passes: for PASS in self._passes: if isinstance(PASS, AnalyzeBase): PASS.clean_all(graph_module) return graph_module # File: optimum-main/optimum/fx/parallelization/utils.py import fnmatch import glob import hashlib import importlib import json import os import re import tempfile from collections import defaultdict from functools import wraps from itertools import chain from pathlib import Path from typing import Callable, Dict, List, Optional, Union import filelock import torch import torch.nn as nn import torch.nn.functional as F from torch.fx import Graph, Node from tqdm.auto import tqdm from .core import HashableSlice, ParameterMeta, ParameterSlice def ensure_divisibility(numerator: int, denominator: int) -> None: if numerator % denominator != 0: raise RuntimeError(f'{numerator} is not divisible by {denominator}, check if the parallel dimension of weight parameters is divisible by parallelism level(world size of tensor parallel group)') def is_activation(node: Node) -> bool: if node.op != 'call_module': return False mod = node.graph.owning_module return getattr(mod.get_submodule(node.target), '__module__', '').startswith('torch.nn.modules.activation') def is_linear(node: Node) -> bool: if node.op != 'call_module': return False mod = node.graph.owning_module return isinstance(mod.get_submodule(node.target), nn.Linear) def is_embedding(node: Node) -> bool: if node.op != 'call_module': return False mod = node.graph.owning_module return isinstance(mod.get_submodule(node.target), nn.Embedding) def is_shape_consumer(node: Node) -> bool: if node.op == 'call_method': return node.target in {'view', 'reshape', 'expand', 'resize', 'resize_'} elif node.op == 'call_function': return node.target in {torch.reshape} return False def is_output(node: Node) -> bool: return node.op == 'output' def is_shape_generator(node: Node) -> bool: return node.op == 'call_method' and node.target == 'size' def stable_topological_sort(graph: Graph): def _args(n: torch.fx.Node) -> List[torch.fx.node.Argument]: args: List[torch.fx.node.Argument] = [] torch.fx.map_arg((n.args, n.kwargs), args.append) return args pending = list(reversed(graph.nodes)) ready = set() waiting = defaultdict(list) cursor = None while pending: node = pending.pop() waiting_for = [x for x in _args(node) if x not in ready] if waiting_for: waiting[waiting_for[-1]].append(node) else: ready.add(node) if cursor and cursor.next is not node: cursor.append(node) cursor = node pending.extend(reversed(waiting.pop(node, ()))) assert not waiting and len(ready) == len(graph.nodes) def meta_init(init_fn): @wraps(init_fn) def wrapper(*args, **kwargs): kwargs['device'] = kwargs.pop('device', torch.device('meta')) return init_fn(*args, **kwargs) return wrapper @wraps(nn.Linear.forward) def meta_aware_linear_forward(*args, **kwargs): self = args[0] input = args[1] if self.weight.device != torch.device('meta'): return F.linear(input, self.weight, self.bias) orig_device = input.device input = input.to('meta') meta_output = F.linear(input, self.weight, self.bias) return torch.empty_like(meta_output, device=orig_device) @wraps(nn.Embedding.forward) def meta_aware_embedding_forward(*args, **kwargs): self = args[0] input = args[1] if self.weight.device != torch.device('meta'): return F.embedding(input=input, weight=self.weight, padding_idx=self.padding_idx, max_norm=self.max_norm, norm_type=self.norm_type, scale_grad_by_freq=self.scale_grad_by_freq, sparse=self.sparse) orig_device = input.device input = input.to('meta') meta_output = F.embedding(input=input, weight=self.weight, padding_idx=self.padding_idx, max_norm=self.max_norm, norm_type=self.norm_type, scale_grad_by_freq=self.scale_grad_by_freq, sparse=self.sparse) return torch.empty_like(meta_output, device=orig_device) class MetaAwareMethodsPatcher: methods_to_patch: Dict[str, Callable] = [('torch.nn.Linear.__init__', meta_init(nn.Linear.__init__)), ('torch.nn.Embedding.__init__', meta_init(nn.Embedding.__init__)), ('torch.nn.Linear.forward', meta_aware_linear_forward), ('torch.nn.Embedding.forward', meta_aware_embedding_forward)] def __init__(self) -> None: self.patching_specs = [] for (orig, patch_fn) in self.methods_to_patch: (module_qualified_name, attribute_name) = orig.rsplit('.', maxsplit=1) try: module = importlib.import_module(module_qualified_name) except ModuleNotFoundError as e: (module_qualified_name, module_attribute_name) = module_qualified_name.rsplit('.', maxsplit=1) module = importlib.import_module(module_qualified_name) try: module = getattr(module, module_attribute_name) except AttributeError: raise e orig_fn = getattr(module, attribute_name) self.patching_specs.append([module, attribute_name, orig_fn, patch_fn, False]) def _patch(self, identifier: str): for spec in self.patching_specs: if spec[-1]: continue if identifier in spec[1]: setattr(spec[0], spec[1], spec[3]) spec[-1] = True def _unpatch(self, identifier: str): for spec in self.patching_specs: if not spec[-1]: continue if identifier in spec[1]: setattr(spec[0], spec[1], spec[2]) spec[-1] = False def patch_meta_init(self): self._patch('init') def patch_meta_forward(self): self._patch('forward') def unpatch_meta_init(self): self._unpatch('init') def unpatch_meta_forward(self): self._unpatch('forward') def __enter__(self): self.patch_meta_init() self.patch_meta_forward() def __exit__(self, exc_type, exc_value, traceback): self.unpatch_meta_init() def initialize_parameter_meta(model: nn.Module) -> None: parameter_ids = set() for (name, tensor) in model.named_parameters(remove_duplicate=False): key = id(tensor) if key not in parameter_ids: setattr(tensor, 'meta', ParameterMeta(dim=0, mapping={HashableSlice(None, None, None): ParameterSlice(source=name, shape=tuple(tensor.shape))})) parameter_ids.add(key) else: tensor.meta.is_tied = True @torch.no_grad def move_model_to_device(model: nn.Module, device: Union[torch.device, str]): for (name, tensor) in chain(model.named_parameters(), model.named_buffers()): if tensor.device == torch.device('meta'): continue splits = name.rsplit('.', maxsplit=1) if len(splits) == 1: parent_mod = model attr_name = splits[0] else: qualified_name = splits[0] parent_mod = model.get_submodule(qualified_name) attr_name = splits[1] new_tensor = tensor.to(device) if isinstance(tensor, nn.Parameter): new_tensor = nn.Parameter(new_tensor) setattr(parent_mod, attr_name, new_tensor) temp_dir = tempfile.gettempdir() def get_lock(model_name_or_path: str, cache_dir: Optional[str]=None): lock_dir = cache_dir or temp_dir os.makedirs(os.path.dirname(lock_dir), exist_ok=True) model_name = model_name_or_path.replace('/', '-') hash_name = hashlib.sha256(model_name.encode()).hexdigest() lock_file_name = hash_name + model_name + '.lock' lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name), mode=438) return lock class DisabledTqdm(tqdm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, disable=True) def download_model_from_hf(model_name_or_path: str, cache_dir: Optional[str], revision: Optional[str]=None, local_files_only: bool=False, skip_download_weights: bool=False) -> str: import huggingface_hub.constants from huggingface_hub import HfFileSystem, snapshot_download from transformers.utils import CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, WEIGHTS_INDEX_NAME allow_patterns = ['*.safetensors', '*.bin'] if not skip_download_weights and (not huggingface_hub.constants.HF_HUB_OFFLINE): fs = HfFileSystem() file_list = fs.ls(model_name_or_path, detail=False, revision=revision) for pattern in allow_patterns: matching = fnmatch.filter(file_list, pattern) if len(matching) > 0: allow_patterns = [pattern] break if skip_download_weights: allow_patterns = [CONFIG_NAME] elif allow_patterns[0] == '*.safetensors': allow_patterns = allow_patterns + [CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME] else: allow_patterns = allow_patterns + [CONFIG_NAME, WEIGHTS_INDEX_NAME] with get_lock(model_name_or_path, cache_dir): hf_folder = snapshot_download(model_name_or_path, allow_patterns=allow_patterns, cache_dir=cache_dir, revision=revision, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE or local_files_only, tqdm_class=DisabledTqdm) return hf_folder def _original_filename_to_safetensors_filename(filename: str) -> str: from transformers.utils import SAFE_WEIGHTS_NAME (_, extension) = filename.rsplit('.', maxsplit=1) pattern = f'\\w+(-[0-9]*-of-[0-9]*)?\\.{extension}' match_ = re.match(pattern, filename) if not match_: raise ValueError(f'Could not convert {filename} to a safetensor filename.') group_1 = match_.group(1) index_out_of_total_str = group_1 if group_1 is not None else '' (safetensor_filename, safetensor_extension) = SAFE_WEIGHTS_NAME.rsplit('.', maxsplit=1) return f'{safetensor_filename}{index_out_of_total_str}.{safetensor_extension}' def convert_bin_to_safetensors(model_name_or_path: str, cache_dir: Optional[str], weight_files: List[str], weight_map: Dict[str, str]): from safetensors.torch import save_file with get_lock(model_name_or_path, cache_dir): for weight_file in weight_files: weight_file_path = Path(weight_file) safetensors_filename = _original_filename_to_safetensors_filename(weight_file_path.name) output_dir = cache_dir if cache_dir else weight_file_path.parent output_file_path = os.path.join(output_dir, safetensors_filename) if not os.path.isfile(output_file_path): checkpoint = torch.load(weight_file, map_location=torch.device('cpu')) data_pointers = set() for (k, v) in checkpoint.items(): if v.data_ptr() in data_pointers: v = v.detach().clone() v = v.contiguous() checkpoint[k] = v data_pointers.add(v.data_ptr()) save_file(checkpoint, output_file_path) keys = [key for (key, value) in weight_map.items() if value == weight_file] for key in keys: weight_map[key] = output_file_path def try_collect_weight_map(model_name_or_path: str, cache_dir: Optional[str], folder_path: str) -> Dict[str, str]: from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, WEIGHTS_INDEX_NAME weight_map = {} (use_safetensors, weight_patterns) = (False, ['*safetensors', '*.bin']) for pattern in weight_patterns: if len(glob.glob(os.path.join(folder_path, pattern))) > 0: use_safetensors = pattern == '*.safetensors' break index_path = os.path.join(folder_path, SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME) weight_files = glob.glob(os.path.join(folder_path, '*.safetensors' if use_safetensors else '*.bin')) if os.path.isfile(index_path): with open(index_path) as f: index_dict = json.load(f) weight_map = {k: os.path.join(folder_path, v) for (k, v) in index_dict['weight_map'].items()} if not use_safetensors: convert_bin_to_safetensors(model_name_or_path, cache_dir, weight_files, weight_map) if not weight_map: from safetensors import safe_open weight_files = glob.glob(os.path.join(folder_path, '*.safetensors')) for weight_file in weight_files: with safe_open(filename=weight_file, framework='pt') as f: for key in f.keys(): weight_map[key] = weight_file return weight_map # File: optimum-main/optimum/fx/quantization/functions.py from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type, Union import torch from torch.fx.node import Argument, Node, Target from torch.nn.intrinsic import _FusedModule from torch.quantization.fx.graph_module import GraphModule, ObservedGraphModule from torch.quantization.quantize_fx import Scope, ScopeContextManager from torch.quantization.quantize_fx import fuse_fx as orig_fuse_fx from torch.quantization.quantize_fx import prepare_fx as orig_prepare_fx from torch.quantization.quantize_fx import prepare_qat_fx as orig_prepare_qat_fx from transformers import PreTrainedModel from transformers.utils.fx import HFTracer, check_if_model_is_supported, get_concrete_args, symbolic_trace from ..utils import check_if_available if TYPE_CHECKING: from torch.fx import Graph class QuantizationTracer(HFTracer): specialized_concrete_args: Optional[Dict[str, Any]] = None def __init__(self, skipped_module_names: List[str], skipped_module_classes: List[Callable]): super().__init__() self.skipped_module_names = skipped_module_names self.skipped_module_classes = skipped_module_classes self.scope = Scope('', None) self.node_name_to_scope: Dict[str, Tuple[str, type]] = {} def is_leaf_module(self, module: torch.nn.Module, module_qualified_name: str) -> bool: return module.__module__.startswith('torch.nn') and (not isinstance(module, torch.nn.Sequential)) or module_qualified_name in self.skipped_module_names or type(module) in self.skipped_module_classes or isinstance(module, _FusedModule) or super().is_leaf_module(module, module_qualified_name) def call_module(self, module: torch.nn.Module, forward: Callable[..., Any], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: module_qualified_name = self.path_of_module(module) with ScopeContextManager(self.scope, module, module_qualified_name): return super().call_module(module, forward, args, kwargs) def create_node(self, kind: str, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Argument], name: Optional[str]=None, type_expr: Optional[Any]=None) -> Node: node = super().create_node(kind, target, args, kwargs, name, type_expr) self.node_name_to_scope[node.name] = (self.scope.module_path, self.scope.module_type) return node def trace(self, root: 'PreTrainedModel', concrete_args: Optional[Dict[str, Any]]=None) -> 'Graph': if concrete_args is None and self.specialized_concrete_args is not None: concrete_args = self.specialized_concrete_args return super().trace(root, concrete_args=concrete_args) def specialized_quantization_tracer_creator(concrete_args: Dict[str, Any]) -> Type: return type('QuantizationTracer', (QuantizationTracer,), {'specialized_concrete_args': concrete_args}) @check_if_available def fuse_fx(model: Union[PreTrainedModel, GraphModule], fuse_custom_config_dict: Optional[Dict[str, Any]]=None, input_names: Optional[List[str]]=None, check: bool=True) -> GraphModule: if not isinstance(model, GraphModule): if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) model = symbolic_trace(model, input_names, disable_check=not check) orig_symbolic_trace = torch.fx.symbolic_trace torch.fx.symbolic_trace = lambda x: x graph_module = orig_fuse_fx(model, fuse_custom_config_dict=fuse_custom_config_dict) torch.fx.symbolic_trace = orig_symbolic_trace return graph_module @check_if_available def prepare_fx(model: Union[PreTrainedModel, GraphModule], qconfig_dict: Any, prepare_custom_config_dict: Optional[Dict[str, Any]]=None, equalization_qconfig_dict: Optional[Dict[str, Any]]=None, backend_config_dict: Optional[Dict[str, Any]]=None, input_names: Optional[List[str]]=None, check: bool=True) -> ObservedGraphModule: if check: check_if_model_is_supported(model) tracer_cls = QuantizationTracer if not isinstance(model, GraphModule): if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) tracer_cls = specialized_quantization_tracer_creator(get_concrete_args(model, input_names)) orig_quantization_tracer = torch.ao.quantization.quantize_fx.QuantizationTracer torch.ao.quantization.quantize_fx.QuantizationTracer = tracer_cls graph_module = orig_prepare_fx(model, qconfig_dict, prepare_custom_config_dict=prepare_custom_config_dict, equalization_qconfig_dict=equalization_qconfig_dict, backend_config_dict=backend_config_dict) torch.ao.quantization.quantize_fx.QuantizationTracer = orig_quantization_tracer return graph_module @check_if_available def prepare_qat_fx(model: Union[PreTrainedModel, GraphModule], qconfig_dict: Any, prepare_custom_config_dict: Optional[Dict[str, Any]]=None, backend_config_dict: Optional[Dict[str, Any]]=None, input_names: Optional[List[str]]=None, check: bool=True) -> ObservedGraphModule: if check: check_if_model_is_supported(model) tracer_cls = QuantizationTracer if not isinstance(model, GraphModule): if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) tracer_cls = specialized_quantization_tracer_creator(get_concrete_args(model, input_names)) orig_quantization_tracer = torch.ao.quantization.quantize_fx.QuantizationTracer torch.ao.quantization.quantize_fx.QuantizationTracer = tracer_cls graph_module = orig_prepare_qat_fx(model, qconfig_dict, prepare_custom_config_dict=prepare_custom_config_dict, backend_config_dict=backend_config_dict) torch.ao.quantization.quantize_fx.QuantizationTracer = orig_quantization_tracer return graph_module # File: optimum-main/optimum/fx/utils.py from functools import wraps import transformers from packaging import version _TRANSFORMERS_MIN_VERSION = version.parse('4.20.0.dev0') transformers_version = version.parse(transformers.__version__) _fx_features_available = (_TRANSFORMERS_MIN_VERSION.major, _TRANSFORMERS_MIN_VERSION.minor) <= (transformers_version.major, transformers_version.minor) def are_fx_features_available(): return _fx_features_available def check_if_available(func): @wraps(func) def wrapper(*args, **kwargs): if not are_fx_features_available(): raise ImportError(f'Found an incompatible version of transformers. Found version {transformers_version}, but only {_TRANSFORMERS_MIN_VERSION} and above are supported.') return func(*args, **kwargs) return wrapper # File: optimum-main/optimum/gptq/data.py import random from typing import Any, Dict, List, Optional import numpy as np import torch from datasets import load_dataset '' def prepare_dataset(examples: List[Dict[str, torch.LongTensor]], batch_size: int=1, pad_token_id: Optional[int]=None): new_examples = [] for example in examples: input_ids = example['input_ids'] attention_mask = example['attention_mask'] new_examples.append({'input_ids': torch.LongTensor(input_ids), 'attention_mask': torch.LongTensor(attention_mask)}) if batch_size > 1 and pad_token_id is None: raise ValueError('You need to pass a `pad_token_id` in `quantize_model` if you want to have examples with batch size > 1') new_examples = [collate_data(new_examples[start:start + batch_size], contain_labels=False, pad_token_id=pad_token_id) for start in range(0, len(new_examples), batch_size)] return new_examples def collate_data(blocks: List[Dict[str, torch.LongTensor]], contain_labels: bool=False, pad_token_id: Optional[int]=None) -> Dict[str, torch.LongTensor]: def pad_block(block, pads): return torch.cat((pads.to(block.device), block), dim=-1).long() input_ids_blocks = [block['input_ids'] for block in blocks] attention_mask_blocks = [block['attention_mask'] for block in blocks] if contain_labels: label_blocks = [block['labels'] for block in blocks] label_max_len = max([block.size(-1) for block in label_blocks]) bsz = len(blocks) inp_max_len = max([block.size(-1) for block in input_ids_blocks]) for i in range(bsz): (block_bsz, block_inp_len) = input_ids_blocks[i].shape pad_num = inp_max_len - block_inp_len if pad_num > 0: input_ids_blocks[i] = pad_block(input_ids_blocks[i], torch.ones((block_bsz, pad_num)) * pad_token_id) attention_mask_blocks[i] = pad_block(attention_mask_blocks[i], torch.zeros((block_bsz, pad_num))) if contain_labels: block_label_len = label_blocks[i].shape[-1] label_pad_num = label_max_len - block_label_len if label_pad_num > 0: label_blocks[i] = pad_block(label_blocks[i], torch.ones((block_bsz, label_pad_num)) * -100) data = {'input_ids': torch.cat(input_ids_blocks, dim=0).long(), 'attention_mask': torch.cat(attention_mask_blocks, dim=0).long()} if contain_labels: data['labels'] = torch.cat(label_blocks, dim=0).long() return data def get_wikitext2(tokenizer: Any, seqlen: int, nsamples: int, split: str='train'): if split == 'train': data = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train') elif split == 'validation': data = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test') text = ''.join([' \n' if s == '' else s for s in data['text'][:1000]]) enc = tokenizer(text, return_tensors='pt') dataset = [] for _ in range(nsamples): i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = enc.input_ids[:, i:j] attention_mask = torch.ones_like(inp) dataset.append({'input_ids': inp, 'attention_mask': attention_mask}) return dataset def get_c4(tokenizer: Any, seqlen: int, nsamples: int, split: str='train'): if split == 'train': data = load_dataset('allenai/c4', split='train', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}) elif split == 'validation': data = load_dataset('allenai/c4', split='validation', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}) dataset = [] for _ in range(nsamples): while True: i = random.randint(0, len(data) - 1) enc = tokenizer(data[i]['text'], return_tensors='pt') if enc.input_ids.shape[1] >= seqlen: break i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = enc.input_ids[:, i:j] attention_mask = torch.ones_like(inp) dataset.append({'input_ids': inp, 'attention_mask': attention_mask}) return dataset def get_c4_new(tokenizer: Any, seqlen: int, nsamples: int, split: str='train'): if split == 'train': data = load_dataset('allenai/c4', split='train', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}) elif split == 'validation': data = load_dataset('allenai/c4', split='validation', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}) dataset = [] for _ in range(nsamples): while True: i = random.randint(0, len(data) - 1) enc = tokenizer(data[i]['text'], return_tensors='pt') if enc.input_ids.shape[1] >= seqlen: break i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = enc.input_ids[:, i:j] attention_mask = torch.ones_like(inp) dataset.append({'input_ids': inp, 'attention_mask': attention_mask}) return dataset def get_ptb(tokenizer: Any, seqlen: int, nsamples: int, split: str='train'): raise RuntimeError('Loading the `ptb` dataset was deprecated') def get_ptb_new(tokenizer: Any, seqlen: int, nsamples: int, split: str='train'): raise RuntimeError('Loading the `ptb` dataset was deprecated') def get_dataset(dataset_name: str, tokenizer: Any, nsamples: int=128, seqlen: int=2048, seed: int=0, split: str='train'): random.seed(seed) np.random.seed(seed) torch.random.manual_seed(seed) get_dataset_map = {'wikitext2': get_wikitext2, 'c4': get_c4, 'c4-new': get_c4_new} if split not in ['train', 'validation']: raise ValueError(f"The split need to be 'train' or 'validation' but found {split}") if dataset_name in {'ptb', 'ptb-new'}: raise ValueError(f'{dataset_name} dataset was deprecated, only the following dataset are supported : {list(get_dataset_map)}') if dataset_name not in get_dataset_map: raise ValueError(f'Expected a value in {list(get_dataset_map.keys())} but found {dataset_name}') get_dataset_fn = get_dataset_map[dataset_name] return get_dataset_fn(tokenizer=tokenizer, nsamples=nsamples, seqlen=seqlen, split=split) # File: optimum-main/optimum/gptq/eval.py import torch import torch.nn as nn from datasets import load_dataset from tqdm import tqdm def evaluate_perplexity(model, tokenizer): def _perplexity(nlls, n_samples, seqlen): return torch.exp(torch.stack(nlls).sum() / (n_samples * seqlen)) data = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test') data = tokenizer('\n\n'.join(data['text']), return_tensors='pt') data = data.input_ids.to(model.device) seqlen = 512 model = model.eval() n_samples = data.numel() // seqlen nlls = [] with tqdm(range(n_samples), desc='Perplexity -') as progress_bar: for i in progress_bar: start_index = i * seqlen end_index = (i + 1) * seqlen batch = data[:, start_index:end_index].to(model.device) with torch.no_grad(): logits = model(batch).logits shift_logits = logits[:, :-1, :].contiguous().float() shift_labels = data[:, start_index:end_index][:, 1:] loss_fct = nn.CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) neg_log_likelihood = loss.float() * seqlen nlls.append(neg_log_likelihood) curr_ppl = _perplexity(nlls, i + 1, seqlen) progress_bar.set_description(f'Perplexity {curr_ppl:.3f}') ppl = _perplexity(nlls, n_samples, seqlen) return ppl.item() # File: optimum-main/optimum/gptq/quantizer.py import json import os from enum import Enum from logging import getLogger from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from tqdm.auto import tqdm from transformers import AutoTokenizer from transformers.pytorch_utils import Conv1D from transformers.utils.quantization_config import QuantizationMethod from ..utils import is_accelerate_available, is_auto_gptq_available from ..utils.modeling_utils import recurse_getattr from .constants import GPTQ_CONFIG from .data import get_dataset, prepare_dataset from .utils import get_block_name_with_pattern, get_device, get_layers, get_preceding_modules, get_seqlen if is_accelerate_available(): from accelerate import cpu_offload_with_hook, load_checkpoint_and_dispatch from accelerate.hooks import remove_hook_from_module if is_auto_gptq_available(): from auto_gptq import exllama_set_max_input_length from auto_gptq.modeling._utils import autogptq_post_init from auto_gptq.quantization import GPTQ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear logger = getLogger(__name__) class ExllamaVersion(int, Enum): ONE = 1 TWO = 2 class GPTQQuantizer(object): def __init__(self, bits: int, dataset: Optional[Union[List[str], str]]=None, group_size: int=128, damp_percent: float=0.1, desc_act: bool=False, sym: bool=True, true_sequential: bool=True, use_cuda_fp16: bool=False, model_seqlen: Optional[int]=None, block_name_to_quantize: Optional[str]=None, module_name_preceding_first_block: Optional[List[str]]=None, batch_size: int=1, pad_token_id: Optional[int]=None, disable_exllama: bool=False, exllama_config: Dict[str, Any]=None, max_input_length: Optional[int]=None, cache_block_outputs: Optional[bool]=True, modules_in_block_to_quantize: Optional[List[List[str]]]=None, *args, **kwargs): self.bits = bits self.dataset = dataset self.group_size = group_size self.damp_percent = damp_percent self.desc_act = desc_act self.sym = sym self.true_sequential = true_sequential self.use_cuda_fp16 = use_cuda_fp16 self.model_seqlen = model_seqlen self.block_name_to_quantize = block_name_to_quantize self.module_name_preceding_first_block = module_name_preceding_first_block self.batch_size = batch_size self.pad_token_id = pad_token_id self.disable_exllama = disable_exllama self.exllama_config = exllama_config self.max_input_length = max_input_length self.quant_method = QuantizationMethod.GPTQ self.cache_block_outputs = cache_block_outputs self.modules_in_block_to_quantize = modules_in_block_to_quantize self.serialization_keys = ['bits', 'dataset', 'group_size', 'damp_percent', 'desc_act', 'sym', 'true_sequential', 'quant_method', 'modules_in_block_to_quantize'] if self.bits not in [2, 3, 4, 8]: raise ValueError('only support quantize to [2,3,4,8] bits.') if self.group_size != -1 and self.group_size <= 0: raise ValueError('group_size must be greater than 0 or equal to -1') if not 0 < self.damp_percent < 1: raise ValueError('damp_percent must between 0 and 1.') if self.exllama_config is None: self.exllama_config = {'version': ExllamaVersion.TWO} elif 'version' not in self.exllama_config: raise ValueError('`exllama_config` needs to have a `version` key') elif self.exllama_config['version'] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: version = self.exllama_config['version'] raise ValueError(f'Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {version}') self.exllama_version = self.exllama_config['version'] def to_dict(self): gptq_dict = {} for key in self.serialization_keys: gptq_dict[key] = getattr(self, key) return gptq_dict @classmethod def from_dict(cls, config_dict: Dict[str, Any]): return cls(**config_dict) def convert_model(self, model: nn.Module): if self.block_name_to_quantize is None: self.block_name_to_quantize = get_block_name_with_pattern(model) block_name = self.block_name_to_quantize layers_to_be_replaced = get_layers(model, prefix=block_name) if self.modules_in_block_to_quantize is not None: layers_to_keep = sum(self.modules_in_block_to_quantize, []) for name in list(layers_to_be_replaced.keys()): if not any((name.endswith(layer) for layer in layers_to_keep)): logger.info(f'Quantization disabled for {name} (only modules_in_block_to_quantize={self.modules_in_block_to_quantize} are quantized)') del layers_to_be_replaced[name] self._replace_by_quant_layers(model, layers_to_be_replaced) return model def get_no_split_module_classes(self, model): block_class_name = recurse_getattr(model, self.block_name_to_quantize)[0].__class__.__name__ no_split_module_classes = [block_class_name] return no_split_module_classes def _replace_by_quant_layers(self, module: nn.Module, names: List[str], name: str=''): QuantLinear = dynamically_import_QuantLinear(use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=self.disable_exllama or self.exllama_version != ExllamaVersion.ONE, disable_exllamav2=self.disable_exllama or self.exllama_version != ExllamaVersion.TWO) if isinstance(module, QuantLinear): return for attr in dir(module): layer = getattr(module, attr) name1 = name + '.' + attr if name != '' else attr if name1 in names: device = get_device(layer) delattr(module, attr) if isinstance(layer, nn.Linear): in_features = layer.in_features out_features = layer.out_features elif isinstance(layer, nn.Conv2d): in_features = layer.in_channels out_features = layer.out_channels elif isinstance(layer, Conv1D): in_features = layer.weight.shape[0] out_features = layer.weight.shape[1] bias = layer.bias is not None if not self.desc_act or self.group_size == -1: new_layer = QuantLinear(self.bits, self.group_size, in_features, out_features, bias, use_cuda_fp16=self.use_cuda_fp16, weight_dtype=layer.weight.dtype) else: new_layer = QuantLinear(self.bits, self.group_size, in_features, out_features, bias, weight_dtype=layer.weight.dtype) new_layer.device = device setattr(module, attr, new_layer.to(device)) for (name1, child) in module.named_children(): self._replace_by_quant_layers(child, names, name + '.' + name1 if name != '' else name1) @torch.no_grad() def quantize_model(self, model: nn.Module, tokenizer: Optional[Any]=None): if not is_auto_gptq_available(): raise RuntimeError('auto-gptq is required in order to perform quantzation : `pip install auto-gptq`') if not torch.cuda.is_available(): raise RuntimeError('No GPU found. A GPU is needed to quantize model.') model.eval() has_config = False has_device_map = False if hasattr(model, 'config'): has_config = True use_cache = model.config.use_cache model.config.use_cache = False if hasattr(model, 'hf_device_map'): devices = list(model.hf_device_map.values()) has_device_map = True if 'disk' in devices: raise ValueError('disk offload is not supported with GPTQ quantization') if 'cpu' in devices or torch.device('cpu') in devices: if len(model.hf_device_map) > 1: logger.info('Cpu offload is not recommended. There might be some issues with the memory') hook = None for (name, device) in model.hf_device_map.items(): if device == 'cpu': module = recurse_getattr(model, name) remove_hook_from_module(module, recurse=True) (module, hook) = cpu_offload_with_hook(module, prev_module_hook=hook) else: has_device_map = False if hasattr(model, 'dtype'): self.use_cuda_fp16 = model.dtype == torch.float16 if self.model_seqlen is None: self.model_seqlen = min(4028, get_seqlen(model)) device = get_device(model) if isinstance(self.dataset, list) and (not isinstance(self.dataset[0], str)): dataset = self.dataset logger.info('GPTQQuantizer dataset appears to be already tokenized. Skipping tokenization.') else: if isinstance(tokenizer, str): try: tokenizer = AutoTokenizer.from_pretrained(tokenizer) except Exception: raise ValueError(f'We were not able to get the tokenizer using `AutoTokenizer.from_pretrained`\n with the string that you have passed {tokenizer}. If you have a custom tokenizer, you can pass it as input.\n For now, we only support quantization for text model. Support for vision, speech and multimodel will come later.') if self.dataset is None: raise ValueError('You need to pass `dataset` in order to quantize your model') elif isinstance(self.dataset, str): dataset = get_dataset(self.dataset, tokenizer, seqlen=self.model_seqlen, split='train') elif isinstance(self.dataset, list): dataset = [tokenizer(data, return_tensors='pt') for data in self.dataset] else: raise ValueError(f'You need to pass a list of string, a list of tokenized data or a string for `dataset`. Found: {type(self.dataset)}.') dataset = prepare_dataset(dataset, pad_token_id=self.pad_token_id, batch_size=self.batch_size) layer_inputs = [] layer_outputs = [] layer_input_kwargs = [] if self.block_name_to_quantize is None: self.block_name_to_quantize = get_block_name_with_pattern(model) if self.module_name_preceding_first_block is None: self.module_name_preceding_first_block = get_preceding_modules(model, self.block_name_to_quantize) blocks = recurse_getattr(model, self.block_name_to_quantize) if not has_device_map: for module_name in self.module_name_preceding_first_block: module = recurse_getattr(model, module_name) if module is None: raise ValueError(f'Module {module_name} was not found in model') module = module.to(0) blocks[0] = blocks[0].to(0) def store_input_hook(_, input, *args): kwargs = args[0] if input is None: if 'hidden_states' in kwargs: input = (kwargs['hidden_states'],) else: raise ValueError('No input value found in the foward pass') layer_inputs.append(input) other_kwargs = {} for (k, v) in kwargs.items(): if k not in ['hidden_states']: other_kwargs[k] = v layer_input_kwargs.append(other_kwargs) raise ValueError if self.cache_block_outputs: handle = blocks[0].register_forward_pre_hook(store_input_hook, with_kwargs=True) for data in dataset: for (k, v) in data.items(): if not has_device_map or device.type == 'cpu': data[k] = v.to(0) else: data[k] = v.to(device) try: model(**data) except ValueError: pass handle.remove() if not has_device_map: blocks[0].to(device) for module_name in self.module_name_preceding_first_block: module = recurse_getattr(model, module_name) if module is None: raise ValueError(f'Module {module_name} was not found in model') torch.cuda.empty_cache() quantizers = {} for (i, block) in enumerate(tqdm(blocks, desc=f'Quantizing {self.block_name_to_quantize} blocks ')): logger.info(f'Start quantizing block {self.block_name_to_quantize} {i + 1}/{len(blocks)}') if not self.cache_block_outputs: handle = block.register_forward_pre_hook(store_input_hook, with_kwargs=True) for data in dataset: for (k, v) in data.items(): if not has_device_map or device.type == 'cpu': data[k] = v.to(0) else: data[k] = v.to(device) try: model(**data) except ValueError: pass handle.remove() if not has_device_map or get_device(block) == torch.device('cpu'): block = block.to(0) layers = get_layers(block) if isinstance(self.modules_in_block_to_quantize, list) and len(self.modules_in_block_to_quantize) > 0: if self.true_sequential: layers_name_list = self.modules_in_block_to_quantize else: layers_name_list = [sum(self.modules_in_block_to_quantize, [])] elif self.true_sequential: layers_name_list = [[key] for key in layers.keys()] else: layers_name_list = [list(layers.keys())] logger.info(f'Module to quantize {layers_name_list}') for subset_name_list in tqdm(layers_name_list, leave=False, desc='Quantizing layers inside the block'): subset_layers = {name: layers[name] for name in subset_name_list} gptq = {} handles = [] for name in subset_layers: gptq[name] = GPTQ(subset_layers[name]) gptq[name].quantizer.configure(bits=self.bits, sym=self.sym, perchannel=True) def add_batch(name): def tmp(_, input, output): gptq[name].add_batch(input[0].data, output.data) return tmp handles.append(subset_layers[name].register_forward_hook(add_batch(name))) for j in range(len(dataset)): block(*layer_inputs[j], **layer_input_kwargs[j]) for h in handles: h.remove() for name in subset_name_list: logger.info(f'Quantizing {name} in block {i + 1}/{len(blocks)}...') (scale, zero, g_idx) = gptq[name].fasterquant(percdamp=self.damp_percent, group_size=self.group_size, actorder=self.desc_act) quantizers[f'{self.block_name_to_quantize}.{i}.{name}'] = (gptq[name].quantizer, scale, zero, g_idx) gptq[name].free() del subset_layers if self.cache_block_outputs: for j in range(len(dataset)): layer_output = block(*layer_inputs[j], **layer_input_kwargs[j]) layer_outputs.append(layer_output) if not has_device_map: blocks[i] = block.to(device) del layers del layer_inputs (layer_inputs, layer_outputs) = (layer_outputs, []) else: del layers del layer_inputs layer_inputs = [] torch.cuda.empty_cache() if self.bits == 4: if device == torch.device('cpu') or (has_device_map and any((d in devices for d in ['cpu', 'disk']))): if not self.disable_exllama: logger.warning('Found modules on cpu/disk. Using Exllama/Exllamav2 backend requires all the modules to be on GPU. Setting `disable_exllama=True`') self.disable_exllama = True elif self.desc_act and (not self.disable_exllama) and (self.exllama_version == ExllamaVersion.ONE): logger.warning('Using Exllama backend with act_order will reorder the weights offline, thus you will not be able to save the model with the right weights.Setting `disable_exllama=True`. You should only use Exllama backend with act_order for inference. ') self.disable_exllama = True elif not self.disable_exllama and self.exllama_version == ExllamaVersion.TWO: logger.warning('Using Exllamav2 backend will reorder the weights offline, thus you will not be able to save the model with the right weights.Setting `disable_exllama=True`. You should only use Exllamav2 backend for inference. ') self.disable_exllama = True self.pack_model(model=model, quantizers=quantizers) model.is_quantized = True model.quantization_method = QuantizationMethod.GPTQ if has_config: model.config.use_cache = use_cache model.config.quantization_config = self.to_dict() model = self.post_init_model(model) torch.cuda.empty_cache() return model def post_init_model(self, model): if self.bits == 4 and (not self.disable_exllama): if get_device(model) == torch.device('cpu') or (hasattr(model, 'hf_device_map') and any((d in model.hf_device_map for d in ['cpu', 'disk']))): raise ValueError('Found modules on cpu/disk. Using Exllama or Exllamav2 backend requires all the modules to be on GPU.You can deactivate exllama backend by setting `disable_exllama=True` in the quantization config object') class StoreAttr(object): pass model.quantize_config = StoreAttr() model.quantize_config.desc_act = self.desc_act model = autogptq_post_init(model, use_act_order=self.desc_act) if self.desc_act and (not self.disable_exllama and self.exllama_version == ExllamaVersion.ONE) and (self.max_input_length is not None): model = exllama_set_max_input_length(model, self.max_input_length) return model def pack_model(self, model: nn.Module, quantizers: Dict[str, Tuple]): QuantLinear = dynamically_import_QuantLinear(use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=self.disable_exllama or self.exllama_version != ExllamaVersion.ONE, disable_exllamav2=self.disable_exllama or self.exllama_version != ExllamaVersion.TWO) logger.info('Packing model...') layers = get_layers(model) layers = {n: layers[n] for n in quantizers} self._replace_by_quant_layers(model, quantizers) qlayers = get_layers(model, [QuantLinear]) for name in qlayers: logger.info(name) (quantizers[name], scale, zero, g_idx) = quantizers[name] layer_device = qlayers[name].device qlayers[name].to('cpu') (layers[name], scale, zero, g_idx) = (layers[name].to('cpu'), scale.to('cpu'), zero.to('cpu'), g_idx.to('cpu')) qlayers[name].pack(layers[name], scale, zero, g_idx) qlayers[name].to(layer_device) logger.info('Model packed.') def save(self, model: nn.Module, save_dir: str, max_shard_size: str='10GB', safe_serialization: bool=True): os.makedirs(save_dir, exist_ok=True) model.save_pretrained(save_dir, max_shard_size=max_shard_size, safe_serialization=safe_serialization) with open(os.path.join(save_dir, GPTQ_CONFIG), 'w', encoding='utf-8') as f: json.dump(self.to_dict(), f, indent=2) def load_quantized_model(model: nn.Module, save_folder: str, quant_config_name: str=GPTQ_CONFIG, state_dict_name: Optional[str]=None, device_map: Optional[str]=None, max_memory: Optional[Dict]=None, no_split_module_classes: Optional[Dict]=None, offload_folder: Optional[str]=None, offload_buffers: Optional[str]=None, offload_state_dict: bool=False, disable_exllama: bool=False, exllama_config: Optional[Dict[str, Any]]=None, max_input_length: Optional[int]=None): if not torch.cuda.is_available(): raise RuntimeError('No GPU found. A GPU is needed to run quantized model.') if not is_auto_gptq_available(): raise RuntimeError('auto-gptq is required in order to load quantized weights : `pip install auto-gptq`') if not is_accelerate_available(): raise RuntimeError('You need to install accelerate in order to load and dispatch weights toa quantized model. You can do it with `pip install accelerate`') if device_map is None: device_map = {'': torch.cuda.current_device()} logger.info("The device_map was not initialized.Setting device_map to `{'':torch.cuda.current_device()}`.") if exllama_config is None: exllama_config = {'version': ExllamaVersion.TWO} elif 'version' not in exllama_config: raise ValueError('`exllama_config` needs to have a `version` key') elif exllama_config['version'] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: version = exllama_config['version'] raise ValueError(f'Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {version}') try: if hasattr(model, 'config') and hasattr(model.config, 'quantization_config'): quantize_config_dict = model.config.quantization_config.to_dict() else: with open(os.path.join(save_folder, quant_config_name), 'r', encoding='utf-8') as f: quantize_config_dict = json.load(f) except Exception as err: raise ValueError(f"Failed to load quantization config from {save_folder} (lookup for traceback): {err}\nTip: If the save directory is saved from a transformers.PreTrainedModel, make sure that `config.json` contains a 'quantization_config' key.") from err quantizer = GPTQQuantizer.from_dict(quantize_config_dict) quantizer.disable_exllama = disable_exllama quantizer.exllama_config = exllama_config quantizer.exllama_version = quantizer.exllama_config['version'] quantizer.max_input_length = max_input_length model = quantizer.convert_model(model) if no_split_module_classes is None: no_split_module_classes = quantizer.get_no_split_module_classes(model) model = load_checkpoint_and_dispatch(model, checkpoint=os.path.join(save_folder, state_dict_name) if state_dict_name is not None else save_folder, device_map=device_map, max_memory=max_memory, no_split_module_classes=no_split_module_classes, offload_folder=offload_folder, offload_buffers=offload_buffers, offload_state_dict=offload_state_dict) model = quantizer.post_init_model(model) model.is_quantized = True model.quantization_method = QuantizationMethod.GPTQ model.eval() return model # File: optimum-main/optimum/gptq/utils.py from logging import getLogger from typing import Optional, Union import torch from torch import nn from transformers.pytorch_utils import Conv1D from .constants import BLOCK_PATTERNS, SEQLEN_KEYS_TRANFORMERS logger = getLogger(__name__) '' def get_layers(module: nn.Module, layers=[Conv1D, nn.Conv2d, nn.Linear], prefix: Optional[str]=None, name: str=''): for layer in layers: if isinstance(module, layer): if prefix is not None: if name.startswith(prefix): return {name: module} else: return {name: module} res = {} for (name1, child) in module.named_children(): res.update(get_layers(child, layers=layers, prefix=prefix, name=name + '.' + name1 if name != '' else name1)) return res def get_block_name_with_pattern(model: nn.Module): modules_names = [n for (n, _) in model.named_modules()] for pattern_candidate in BLOCK_PATTERNS: pattern_candidate = pattern_candidate if any((pattern_candidate in name for name in modules_names)): return pattern_candidate raise ValueError('Block pattern could not be match. Pass `block_name_to_quantize` argument in `quantize_model`') def get_preceding_modules(model: nn.Module, module_name: str): previous_module_name = [] stop_adding = False def _get_preceding_modules(model: nn.Module, module_name: str, name: str=''): nonlocal stop_adding for (name_bis, child) in model.named_children(): new_name = name + '.' + name_bis if name != '' else name_bis if new_name == module_name: stop_adding = True break _get_preceding_modules(child, module_name, name=new_name) if not stop_adding: previous_module_name.append(name) return previous_module_name return _get_preceding_modules(model, module_name) def get_device(obj: Union[torch.Tensor, nn.Module]): if isinstance(obj, torch.Tensor): return obj.device return next(obj.parameters()).device def get_seqlen(model: nn.Module): if hasattr(model, 'config'): model_config = model.config.to_dict() if any((k in model_config for k in SEQLEN_KEYS_TRANFORMERS)): for key in SEQLEN_KEYS_TRANFORMERS: if key in model_config: return model_config[key] logger.info("We couldn't get the model sequence length. Setting it to 2048. You can overwrite this value by passing `model_seqlen` in` GPTQQuantizer`") return 2048 # File: optimum-main/optimum/modeling_base.py """""" import logging import os import subprocess import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from huggingface_hub import create_repo, upload_file from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from transformers import AutoConfig, PretrainedConfig, add_start_docstrings from .exporters import TasksManager from .utils import CONFIG_NAME if TYPE_CHECKING: from transformers import PreTrainedModel, TFPreTrainedModel logger = logging.getLogger(__name__) FROM_PRETRAINED_START_DOCSTRING = '\n Instantiate a pretrained model from a pre-trained model configuration.\n\n Args:\n model_id (`Union[str, Path]`):\n Can be either:\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a\n user or organization name, like `dbmdz/bert-base-german-cased`.\n - A path to a *directory* containing a model saved using [`~OptimizedModel.save_pretrained`],\n e.g., `./my_model_directory/`.\n export (`bool`, defaults to `False`):\n Defines whether the provided `model_id` needs to be exported to the targeted format.\n force_download (`bool`, defaults to `True`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n use_auth_token (`Optional[Union[bool,str]]`, defaults to `None`):\n Deprecated. Please use the `token` argument instead.\n token (`Optional[Union[bool,str]]`, defaults to `None`):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `huggingface-cli login` (stored in `huggingface_hub.constants.HF_TOKEN_PATH`).\n cache_dir (`Optional[str]`, defaults to `None`):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n subfolder (`str`, defaults to `""`):\n In case the relevant files are located inside a subfolder of the model repo either locally or on huggingface.co, you can\n specify the folder name here.\n config (`Optional[transformers.PretrainedConfig]`, defaults to `None`):\n The model configuration.\n local_files_only (`Optional[bool]`, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n trust_remote_code (`bool`, defaults to `False`):\n Whether or not to allow for custom code defined on the Hub in their own modeling. This option should only be set\n to `True` for repositories you trust and in which you have read the code, as it will execute code present on\n the Hub on your local machine.\n revision (`Optional[str]`, defaults to `None`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n' class PreTrainedModel(ABC): pass class OptimizedModel(PreTrainedModel): config_class = AutoConfig load_tf_weights = None base_model_prefix = 'optimized_model' config_name = CONFIG_NAME def __init__(self, model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: PretrainedConfig): super().__init__() self.model = model self.config = config self.preprocessors = [] def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) @abstractmethod def forward(self, *args, **kwargs): raise NotImplementedError def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) self._save_config(save_directory) for preprocessor in self.preprocessors: preprocessor.save_pretrained(save_directory) self._save_pretrained(save_directory) if push_to_hub: return self.push_to_hub(save_directory, **kwargs) @abstractmethod def _save_pretrained(self, save_directory): raise NotImplementedError def _save_config(self, save_directory): self.config.save_pretrained(save_directory) def push_to_hub(self, save_directory: str, repository_id: str, private: Optional[bool]=None, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None) -> str: if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token create_repo(token=token, repo_id=repository_id, exist_ok=True, private=private) for (path, subdirs, files) in os.walk(save_directory): for name in files: local_file_path = os.path.join(path, name) (_, hub_file_path) = os.path.split(local_file_path) try: upload_file(token=token, repo_id=f'{repository_id}', path_or_fileobj=os.path.join(os.getcwd(), local_file_path), path_in_repo=hub_file_path) except KeyError: pass except NameError: pass def git_config_username_and_email(self, git_user: str=None, git_email: str=None): try: if git_user is not None: subprocess.run(['git', 'config', '--global', 'user.name', git_user], stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding='utf-8') if git_email is not None: subprocess.run(['git', 'config', '--global', 'user.email', git_email], stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding='utf-8') except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) @classmethod def _load_config(cls, config_name_or_path: Union[str, os.PathLike], revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, force_download: bool=False, subfolder: str='', trust_remote_code: bool=False) -> PretrainedConfig: if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token try: config = AutoConfig.from_pretrained(pretrained_model_name_or_path=config_name_or_path, revision=revision, cache_dir=cache_dir, force_download=force_download, token=token, subfolder=subfolder, trust_remote_code=trust_remote_code) except OSError as e: if subfolder != '': config = AutoConfig.from_pretrained(pretrained_model_name_or_path=config_name_or_path, revision=revision, cache_dir=cache_dir, force_download=force_download, token=token, trust_remote_code=trust_remote_code) logger.info(f'config.json not found in the specified subfolder {subfolder}. Using the top level config.json.') else: raise OSError(e) return config @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, **kwargs) -> 'OptimizedModel': raise NotImplementedError('Overwrite this method in subclass to define how to load your model from pretrained') @classmethod def _from_transformers(cls, model_id: Union[str, Path], config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, **kwargs) -> 'OptimizedModel': raise NotImplementedError('`_from_transformers` method will be deprecated in a future release. Please override `_export` insteadto define how to load your model from vanilla transformers model') @classmethod def _export(cls, model_id: Union[str, Path], config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, **kwargs) -> 'OptimizedModel': raise NotImplementedError('Overwrite this method in subclass to define how to load your model from vanilla hugging face model') @classmethod @add_start_docstrings(FROM_PRETRAINED_START_DOCSTRING) def from_pretrained(cls, model_id: Union[str, Path], export: bool=False, force_download: bool=False, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', config: Optional[PretrainedConfig]=None, local_files_only: bool=False, trust_remote_code: bool=False, revision: Optional[str]=None, **kwargs) -> 'OptimizedModel': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if isinstance(model_id, Path): model_id = model_id.as_posix() from_transformers = kwargs.pop('from_transformers', None) if from_transformers is not None: logger.warning('The argument `from_transformers` is deprecated, and will be removed in optimum 2.0. Use `export` instead') export = from_transformers if len(model_id.split('@')) == 2: if revision is not None: logger.warning(f"The argument `revision` was set to {revision} but will be ignored for {model_id.split('@')[1]}") (model_id, revision) = model_id.split('@') library_name = TasksManager.infer_library_from_model(model_id, subfolder, revision, cache_dir, token=token) if library_name == 'timm': config = PretrainedConfig.from_pretrained(model_id, subfolder, revision) if config is None: if os.path.isdir(os.path.join(model_id, subfolder)) and cls.config_name == CONFIG_NAME: if CONFIG_NAME in os.listdir(os.path.join(model_id, subfolder)): config = AutoConfig.from_pretrained(os.path.join(model_id, subfolder), trust_remote_code=trust_remote_code) elif CONFIG_NAME in os.listdir(model_id): config = AutoConfig.from_pretrained(os.path.join(model_id, CONFIG_NAME), trust_remote_code=trust_remote_code) logger.info(f'config.json not found in the specified subfolder {subfolder}. Using the top level config.json.') else: raise OSError(f'config.json not found in {model_id} local folder') else: config = cls._load_config(model_id, revision=revision, cache_dir=cache_dir, token=token, force_download=force_download, subfolder=subfolder, trust_remote_code=trust_remote_code) elif isinstance(config, (str, os.PathLike)): config = cls._load_config(config, revision=revision, cache_dir=cache_dir, token=token, force_download=force_download, subfolder=subfolder, trust_remote_code=trust_remote_code) from_pretrained_method = cls._from_transformers if export else cls._from_pretrained return from_pretrained_method(model_id=model_id, config=config, revision=revision, cache_dir=cache_dir, force_download=force_download, token=token, subfolder=subfolder, local_files_only=local_files_only, trust_remote_code=trust_remote_code, **kwargs) # File: optimum-main/optimum/onnx/__init__.py from typing import TYPE_CHECKING from transformers.utils import _LazyModule _import_structure = {'graph_transformations': ['cast_slice_nodes_inputs_to_int32', 'merge_decoders', 'remove_duplicate_weights', 'replace_atenops_to_gather', 'remove_duplicate_weights_from_tied_info']} if TYPE_CHECKING: from .graph_transformations import cast_slice_nodes_inputs_to_int32, merge_decoders, remove_duplicate_weights, remove_duplicate_weights_from_tied_info, replace_atenops_to_gather else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: optimum-main/optimum/onnx/configuration.py from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Optional from transformers.file_utils import TensorType from transformers.utils import logging if TYPE_CHECKING: from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast logger = logging.get_logger(__name__) class EncoderOnnxConfig(OnnxConfig): @property def inputs(self) -> Dict[str, Dict[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Dict[str, Dict[int, str]]: return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'sequence'}}) class DecoderOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Dict[str, Dict[int, str]]: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'past_decoder_sequence + sequence'}), ('encoder_hidden_states', {0: 'batch', 1: 'encoder_sequence'}), ('encoder_attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') return common_inputs def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Dict[str, Any]: import torch common_inputs = {} dummy_input = super().generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) (batch, encoder_seq_length) = dummy_input['input_ids'].shape encoder_hidden_states_shape = (batch, encoder_seq_length, self._config.hidden_size) common_inputs['input_ids'] = dummy_input.pop('decoder_input_ids') common_inputs['encoder_hidden_states'] = torch.zeros(encoder_hidden_states_shape) common_inputs['encoder_attention_mask'] = dummy_input.pop('attention_mask') if 'past_key_values' in dummy_input: common_inputs['past_key_values'] = dummy_input.pop('past_key_values') return common_inputs @property def outputs(self) -> Dict[str, Dict[int, str]]: common_outputs = super(OnnxConfigWithPast, self).outputs self.fill_with_past_key_values_(common_outputs, direction='outputs') return common_outputs def fill_with_past_key_values_(self, inputs_or_outputs: Dict[str, Dict[int, str]], direction: str): num_pkv_per_layer = 4 (_, num_decoder_layers) = self.num_layers name = 'past' if direction == 'inputs' else 'present' decoder_sequence = 'past_decoder_sequence' if direction == 'inputs' else 'past_decoder_sequence + sequence' for i in range(num_decoder_layers * num_pkv_per_layer): inputs_or_outputs[f'{name}_key_values_{i}'] = {0: 'batch', 2: decoder_sequence} # File: optimum-main/optimum/onnx/graph_transformations.py import copy import os from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union import onnx from onnx import ModelProto from ..utils import logging from .transformations_utils import _create_name_sharing_dict, _deduplicate_gather_matmul, _deduplicated_cross_model_initializers, _find_duplicate_initializers, _find_matching_initializers, _get_all_inputs, _get_onnx_opset, _get_weights_to_tie, _remove_redundant_initializers, _replace_input_names, _unify_onnx_outputs, cast_int64_tensorproto_to_int32 if TYPE_CHECKING: import torch.nn as nn logger = logging.get_logger() def remove_duplicate_weights(model: ModelProto, inplace: bool=False) -> ModelProto: if not inplace: model = copy.deepcopy(model) duplicates = _find_duplicate_initializers(models=[model]) name_sharing_dict = _create_name_sharing_dict(duplicates) _replace_input_names(models=[model], name_sharing_dict=name_sharing_dict) _remove_redundant_initializers(models=[model], name_sharing_dict=name_sharing_dict) return model def remove_duplicate_weights_from_tied_info(onnx_model: ModelProto, torch_model: 'nn.Module', tied_params: List[List[str]], save_path: str): (tied_params_with_op, tied_groups_to_tie, tied_groups_ignored) = _get_weights_to_tie(tied_params, torch_model) if len(tied_groups_ignored) >= 1: logger.info(f'The groups of weights {tied_groups_ignored} will not be tied as either already tied or tying is not implemented.') initializer_name_to_idx = {} for (idx, initializer) in enumerate(onnx_model.graph.initializer): initializer_name_to_idx[initializer.name] = idx tied_groups_map = _find_matching_initializers(tied_params_with_op, onnx_model, initializer_name_to_idx) onnx_model = _deduplicate_gather_matmul(onnx_model, tied_groups_to_tie, tied_groups_map, initializer_name_to_idx) check_and_save_model(onnx_model, save_path=save_path) return onnx_model def replace_atenops_to_gather(model: ModelProto) -> ModelProto: nodes = model.graph.node for node in nodes: if node.op_type in ['ATenOp', 'ATen']: op_num = node.name.split('_')[-1] new_node = onnx.helper.make_node('Gather', name='Gather_' + op_num, inputs=[node.input[0], node.input[1]], outputs=node.output) model.graph.node.remove(node) model.graph.node.insert(int(op_num), new_node) onnx.checker.check_model(model) return model def check_and_save_model(model: onnx.ModelProto, save_path: Optional[Union[str, Path]]): if model.ByteSize() < onnx.checker.MAXIMUM_PROTOBUF: try: onnx.checker.check_model(model) except Exception as e: if 'No Op registered for' in str(e): pass else: raise e if save_path: save_path = Path(save_path).as_posix() external_file_name = os.path.basename(save_path) + '_data' external_path = os.path.join(os.path.dirname(save_path), external_file_name) if save_path.endswith('.onnx') and os.path.isfile(save_path): os.remove(save_path) if os.path.isfile(external_path): os.remove(external_path) onnx.save(model, save_path, convert_attribute=True) elif save_path is not None: save_path = Path(save_path).as_posix() external_file_name = os.path.basename(save_path) + '_data' external_path = os.path.join(os.path.dirname(save_path), external_file_name) if save_path.endswith('.onnx') and os.path.isfile(save_path): os.remove(save_path) if os.path.isfile(external_path): os.remove(external_path) onnx.save(model, save_path, save_as_external_data=True, all_tensors_to_one_file=True, location=external_file_name, convert_attribute=True) try: onnx.checker.check_model(save_path) except Exception as e: if 'No Op registered for' in str(e): pass else: raise e else: logger.info('Merged ONNX model exceeds 2GB, the model will not be checked without `save_path` given.') def merge_decoders(decoder: Union[ModelProto, Path, str], decoder_with_past: Union[ModelProto, Path, str], graph_name: str='merged', producer_name: str='optimum-onnx', save_path: Optional[Union[str, Path]]=None, strict: bool=True) -> ModelProto: if isinstance(decoder, (str, Path)): decoder = Path(decoder).as_posix() decoder = onnx.load(decoder) if isinstance(decoder_with_past, (str, Path)): decoder_with_past = Path(decoder_with_past).as_posix() decoder_with_past = onnx.load(decoder_with_past) decoder_opset = _get_onnx_opset(decoder) decoder_with_past_opset = _get_onnx_opset(decoder_with_past) if decoder_opset != decoder_with_past_opset: raise ValueError(f"Decoder's opset is {decoder_opset}, but decoder with past's opset is {decoder_with_past_opset}. Make sure having the same opset before merging.") _unify_onnx_outputs(decoder, decoder_with_past, strict=strict) all_inputs = _get_all_inputs([decoder, decoder_with_past]) for (_, inp) in enumerate(all_inputs): if inp.name == 'attention_mask': if inp.type.tensor_type.shape.dim[1].dim_param != 'sequence_length': raise ValueError('Expected attention_mask second axis to be dynamic and named `sequence_length`.') inp.type.tensor_type.shape.dim[1].dim_param = 'attention_mask_sequence_length' deduplicated_initializers = _deduplicated_cross_model_initializers([decoder, decoder_with_past], suffix=graph_name) decoder_initializers = [] for initializer in decoder.graph.initializer: if len(initializer.dims) == 0 or (len(initializer.dims) == 1 and initializer.data_type in [6, 7]): decoder_initializers.append(initializer) decoder_with_past_initializers = [] for initializer in decoder_with_past.graph.initializer: if len(initializer.dims) == 0 or (len(initializer.dims) == 1 and initializer.data_type in [6, 7]): decoder_with_past_initializers.append(initializer) no_past_branch = onnx.helper.make_graph(nodes=decoder.graph.node, name='no_past', inputs=[], outputs=decoder.graph.output, initializer=decoder_initializers) with_past_branch = onnx.helper.make_graph(nodes=decoder_with_past.graph.node, name='with_past', inputs=[], outputs=decoder_with_past.graph.output, initializer=decoder_with_past_initializers) use_cache_branch = onnx.helper.make_tensor_value_info(name='use_cache_branch', elem_type=onnx.TensorProto.BOOL, shape=[1]) if_node = onnx.helper.make_node('If', inputs=['use_cache_branch'], outputs=[output.name for output in no_past_branch.output], name='optimum::if', then_branch=with_past_branch, else_branch=no_past_branch) merged_graph = onnx.helper.make_graph(nodes=[if_node], name=graph_name, inputs=all_inputs + [use_cache_branch], outputs=no_past_branch.output, initializer=deduplicated_initializers) opset_imports = [] opset_domains = set() for opset_import in list(decoder.opset_import) + list(decoder_with_past.opset_import): if opset_import.domain not in opset_domains: opset_imports.append(opset_import) opset_domains.add(opset_import.domain) merged_model = onnx.helper.make_model_gen_version(merged_graph, producer_name=producer_name, opset_imports=opset_imports, ir_version=9) check_and_save_model(merged_model, save_path=save_path) return merged_model def cast_slice_nodes_inputs_to_int32(model: ModelProto) -> ModelProto: map_input_node = {} map_node_inputs = {} for node in model.graph.node: for input_name in node.input: map_input_node[input_name] = {'op_type': node.op_type, 'node_name': node.name} map_node_inputs[node.name] = node.input for node in model.graph.node: if node.op_type == 'Constant' and node.attribute[0].t.data_type == 7 and (f'{node.name}_output_0' in map_input_node) and (map_input_node[node.name + '_output_0']['op_type'] == 'Slice'): logger.debug(f'Converting {node.name} to int32') cast = all(('Constant' in inp for inp in map_node_inputs[map_input_node[node.name + '_output_0']['node_name']][1:])) cast_int64_tensorproto_to_int32(node.attribute[0].t, cast=cast) return model # File: optimum-main/optimum/onnx/modeling_seq2seq.py from typing import Optional, Tuple import torch from torch.nn import CrossEntropyLoss from transformers import PreTrainedModel from transformers.file_utils import add_start_docstrings_to_model_forward DECODER_WITH_LM_HEAD_INPUTS_DOCSTRING = '\n Arguments:\n input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, decoder_sequence_length)`.\n encoder_hidden_states (`torch.FloatTensor`):\n The encoder `last_hidden_state` of shape `(batch_size, encoder_sequence_length, hidden_size)`.\n attention_mask (`torch.LongTensor`, *optional*):\n Mask to avoid performing attention on padding token indices of `input_ids`.\n encoder_attention_mask (`torch.LongTensor`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder `input_ids`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)` and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n' class _DecoderWithLMhead(PreTrainedModel): def __init__(self, model: PreTrainedModel): super().__init__(model.config) self.config = model.config self.decoder = model.get_decoder() self.lm_head = model.get_output_embeddings() self.final_logits_bias = getattr(model, 'final_logits_bias', None) @add_start_docstrings_to_model_forward(DECODER_WITH_LM_HEAD_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor, encoder_hidden_states: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, labels: Optional[torch.LongTensor]=None): decoder_outputs = self.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, return_dict=True, use_cache=True) last_hidden_state = decoder_outputs.last_hidden_state if self.config.model_type == 't5' and self.config.tie_word_embeddings: last_hidden_state = last_hidden_state * self.config.d_model ** (-0.5) lm_logits = self.lm_head(last_hidden_state) if self.final_logits_bias is not None: lm_logits += self.final_logits_bias if labels is None: return (lm_logits, decoder_outputs.past_key_values) else: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) return (loss, lm_logits, decoder_outputs.past_key_values) # File: optimum-main/optimum/onnx/transformations_utils.py import hashlib from collections import defaultdict from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Set, Tuple import numpy as np import onnx from onnx import ModelProto, ValueInfoProto, numpy_helper if TYPE_CHECKING: import torch.nn as nn from ..utils import logging, recurse_getattr logger = logging.get_logger() logger.setLevel(logging.INFO) def _find_duplicate_initializers(models: List[ModelProto]) -> DefaultDict[Tuple[int, str, Tuple], Set[Tuple[str, int]]]: duplicates = defaultdict(set) for i in range(len(models)): for initializer in models[i].graph.initializer: tensor_dims = tuple(getattr(initializer, 'dims')) if len(tensor_dims) > 1 or (len(tensor_dims) == 1 and initializer.data_type not in [6, 7]): tensor_data = numpy_helper.to_array(initializer) hashed = hashlib.sha512() hashed.update(tensor_data) tensor_digest = hashed.hexdigest() duplicates[initializer.data_type, tensor_digest, tensor_dims].add((initializer.name, i)) return duplicates def _create_name_sharing_dict(duplicate_weights: DefaultDict[Tuple[int, str, Tuple], Set[Tuple[str, int]]], suffix: str='') -> Dict[Tuple[str, int], str]: name_sharing_dict = {} used_common_names = {} for duplicates in duplicate_weights.values(): (common_name, model_id) = duplicates.pop() if common_name in used_common_names: used_common_names[common_name] += 1 else: used_common_names[common_name] = 0 duplicates.add((common_name, model_id)) for k in duplicates: assert k not in name_sharing_dict name_sharing_dict[k] = f'{common_name}_{suffix}_{used_common_names[common_name]}' if suffix != '' else f'{common_name}' return name_sharing_dict def _replace_input_names(models: List[ModelProto], name_sharing_dict: Dict[Tuple[str, int], str]): for i in range(len(models)): for node in models[i].graph.node: for j in range(len(node.input)): if (node.input[j], i) in name_sharing_dict: node.input[j] = name_sharing_dict[node.input[j], i] def _remove_redundant_initializers(models: List[ModelProto], name_sharing_dict: Dict[Tuple[str, int], str]): to_pop = [] for i in range(len(models)): for (idx, initializer) in enumerate(models[i].graph.initializer): if initializer.name != name_sharing_dict[initializer.name, i]: to_pop.append(idx) for idx in sorted(to_pop, reverse=True): models[i].graph.initializer.pop(idx) def _infer_output_shape(output: ValueInfoProto): output_shape = [] for dim in output.type.tensor_type.shape.dim: if getattr(dim, 'dim_param'): output_shape.append(getattr(dim, 'dim_param')) elif getattr(dim, 'dim_value'): output_shape.append(getattr(dim, 'dim_value')) else: raise ValueError('Cannot find `dim_param` nor `dim_value` in the output dimension info.') return output_shape def _unify_onnx_outputs(model1: ModelProto, model2: ModelProto, strict: bool): model1_outputs = {output.name for output in model1.graph.output} model2_outputs = {output.name for output in model2.graph.output} if model1_outputs != model2_outputs: if strict is True: raise ValueError(f'The two model protos outputs are expected to have the same number of outputs and output names when strict=True. Found the outputs {model1_outputs - model2_outputs} only in model1, and {model2_outputs - model1_outputs} only in model2.') else: logger.info(f'The two models proto have different outputs ({len(model1_outputs)} and {len(model2_outputs)} outputs). Constant outputs will be added to unify the two models outputs. This is expected for encoder-decoder models where cached cross-attention key/values are constant outputs, omitted in the model with KV cache.') if model2_outputs.issubset(model1_outputs) is False: raise ValueError('The second ModelProto should not have more outputs than the first.') for idx in range(len(model1.graph.output)): model_output_1 = model1.graph.output[idx] model_output_2 = model2.graph.output[idx] if idx < len(model2.graph.output) else None if model_output_2 is None or model_output_1 != model_output_2: if model_output_2 is None or not (model_output_1.name == model_output_2.name and model_output_1.type.tensor_type.elem_type == model_output_2.type.tensor_type.elem_type): if strict is False and model_output_1.name not in model2_outputs: data_type = model_output_1.type.tensor_type.elem_type dims_output_1 = _infer_output_shape(model_output_1) if not any((isinstance(dim_output, str) for dim_output in dims_output_1)): raise ValueError(f'Expected at least one dynamic input shape for the output {model_output_1.name}, found a static shape: {dims_output_1}') dims_dummy_output = [] dummy_axis = None for (j, dim) in enumerate(dims_output_1): if isinstance(dim, str) and dummy_axis is None: dims_dummy_output.append(0) dummy_axis = j elif isinstance(dim, str) and dummy_axis is not None: dims_dummy_output.append(1) else: dims_dummy_output.append(dim) logger.info(f'Adding a constant output for {model_output_1.name} of shape {dims_dummy_output} in model2.') value = onnx.helper.make_tensor(name='const_tensor', data_type=data_type, dims=dims_dummy_output, vals=[]) constant_node = onnx.helper.make_node('Constant', name=f'Constant_{len(model2.graph.node) + 1}', inputs=[], outputs=[f'{model_output_1.name}'], value=value) model2.graph.node.append(constant_node) constant_empty_output = onnx.helper.make_tensor_value_info(model_output_1.name, model_output_1.type.tensor_type.elem_type, _infer_output_shape(model_output_1)) model2.graph.output.insert(idx, constant_empty_output) elif model_output_2 is not None: raise ValueError(f'Cannot match {model_output_1.name} with {model_output_2.name}. Make sure your model protos have same outputs, have same data types and are in the same order.') else: raise ValueError(f'Too few outputs of model2 were found to match with {model_output_1.name}. Please try to pass strict=False, or fill a bug report at https://github.com/huggingface/optimum.') else: model2.graph.output.remove(model_output_2) new_output = onnx.helper.make_tensor_value_info(model_output_1.name, model_output_1.type.tensor_type.elem_type, _infer_output_shape(model_output_1)) model2.graph.output.insert(idx, new_output) if not all((model_output_1 == model_output_2 for (model_output_1, model_output_2) in zip(model1.graph.output, model2.graph.output))): raise RuntimeError('Failed to unify outputs of given ONNX model protos.') def _get_all_inputs(model_list: List[ModelProto]) -> List[onnx.onnx_ml_pb2.ValueInfoProto]: inputs = [] input_names = set() for model in model_list: for input in model.graph.input: if input.name not in input_names: input_names.add(input.name) inputs.append(input) return inputs def _get_onnx_opset(model: ModelProto): opset_import = model.opset_import[0] return getattr(opset_import, 'version') def _deduplicated_cross_model_initializers(models: List[ModelProto], suffix: str=None): duplicates = _find_duplicate_initializers(models) name_sharing_dict = _create_name_sharing_dict(duplicates, suffix=suffix) _replace_input_names(models, name_sharing_dict) deduplicated_initializers = [] deduplicated_name = set() for i in range(len(models)): for initializer in models[i].graph.initializer: name_id_pair = (initializer.name, i) if name_id_pair in name_sharing_dict and name_sharing_dict[name_id_pair] not in deduplicated_name: deduplicated_name.add(name_sharing_dict[name_id_pair]) initializer.name = name_sharing_dict[name_id_pair] deduplicated_initializers.append(initializer) return deduplicated_initializers def cast_int64_tensorproto_to_int32(initializer: onnx.TensorProto, cast: bool=False): original_name = initializer.name array = np.copy(numpy_helper.to_array(initializer)) if not array.dtype == np.int64: raise TypeError('Expecting a `TensorProto` of type `int64` (represented as `7` in onnx.TensorProto) in the function int64_tensorproto_to_int32, but got {array.dtype}.') array[array > np.iinfo(np.int32).max] = np.iinfo(np.int32).max array[array < np.iinfo(np.int32).min] = np.iinfo(np.int32).min if cast: array = array.astype(np.int32) array.setflags(write=0) tensor = numpy_helper.from_array(array) initializer.CopyFrom(tensor) initializer.name = original_name def _get_weights_to_tie(tied_params: List[List[str]], torch_model: 'nn.Module') -> Tuple[List[List[str]]]: SUPPORTED_DEDUPLICATION_OPS = ('Embedding', 'Linear') tied_params_with_op = [] tied_groups_to_tie = [] tied_groups_ignored = [] for params in tied_params: tied_params_with_op.append({}) skip_group = False for param_name in params: module_name = '.'.join(param_name.split('.')[:-1]) module = recurse_getattr(torch_model, module_name) if module.__class__.__name__ not in SUPPORTED_DEDUPLICATION_OPS: skip_group = True tied_params_with_op[-1][param_name] = module.__class__.__name__ if skip_group: tied_groups_ignored.append(params) else: tied_groups_to_tie.append(params) return (tied_params_with_op, tied_groups_to_tie, tied_groups_ignored) def _find_matching_initializers(tied_params_with_op: List[Dict[str, str]], model: ModelProto, initializer_name_to_idx: Dict[str, int]): tied_groups_map = {} for params in tied_params_with_op: torch_to_initializer = [] for (param_name, torch_op_name) in params.items(): identical_initializer = False if param_name in initializer_name_to_idx.keys(): nodes_containing_initializer = set() for node in model.graph.node: if param_name in node.input: nodes_containing_initializer.add(node.name) torch_to_initializer.append({'param_name': param_name, 'initializer_name': {param_name}, 'nodes_containing_initializer': nodes_containing_initializer}) identical_initializer = True if not identical_initializer: module_name = '/'.join(param_name.split('.')[:-1]) if param_name.endswith('weight') and torch_op_name == 'Linear': module_name += '/MatMul' elif param_name.endswith('bias') and torch_op_name == 'Linear': module_name += '/Add' candidate_inputs = {} candidate_node_idxs = [] for (i, node) in enumerate(model.graph.node): if module_name in node.name: candidate_node_idxs.append(i) for node_idx in candidate_node_idxs: node_name = model.graph.node[node_idx].name candidate_inputs[node_name] = list(model.graph.node[node_idx].input) torch_to_initializer_param = set() nodes_containing_initializer = set() for (node_name, input_names) in candidate_inputs.items(): for input_name in input_names: if input_name in initializer_name_to_idx.keys(): torch_to_initializer_param.add(input_name) nodes_containing_initializer.add(node_name) if len(torch_to_initializer_param) == 0: logger.warning(f'Could not find ONNX initializer for torch parameter {param_name}. {param_name} will not be checked for deduplication.') torch_to_initializer.append({'param_name': param_name, 'initializer_name': torch_to_initializer_param, 'nodes_containing_initializer': nodes_containing_initializer}) intersect = torch_to_initializer[0]['initializer_name'] for i in range(1, len(params)): intersect = intersect.intersection(torch_to_initializer[i]['initializer_name']) if len(intersect) == 0: logger.warning('Found different candidate ONNX initializers (likely duplicate) for the tied weights:') not_found = [] for (i, torch_to_onnx_map) in enumerate(torch_to_initializer): warn_string = f"\t{torch_to_onnx_map['param_name']}: {torch_to_onnx_map['initializer_name']}" if len(torch_to_onnx_map['initializer_name']) == 0: not_found.append(i) warn_string += ' --> ignored (may be a parameter from a part of the model not exported)' logger.warning(warn_string) for index in not_found[::-1]: del torch_to_initializer[index] if any((len(torch_to_onnx_map['initializer_name']) > 1 for torch_to_onnx_map in torch_to_initializer)): logger.warning(f'Could not find unique initializers corresponding to the torch tied parameters {params}. Deduplication will be skipped for this group of weights although it should be done. Please open an issue in Optimum repository.') continue tied_groups_map[tuple(params)] = torch_to_initializer return tied_groups_map def _deduplicate_gather_matmul(model: ModelProto, tied_groups_to_tie: List[List[str]], tied_groups_map: Dict[Tuple[str], List[Dict[str, Any]]], initializer_name_to_idx: Dict[str, int]): node_name_to_idx = {} for (idx, node) in enumerate(model.graph.node): node_name_to_idx[node.name] = idx for params in tied_groups_to_tie: torch_to_initializer = tied_groups_map[tuple(params)] ref_idx = None for i in range(len(torch_to_initializer)): ops_using_initializer = set() for node_name in torch_to_initializer[i]['nodes_containing_initializer']: ops_using_initializer.add(model.graph.node[node_name_to_idx[node_name]].op_type) if ops_using_initializer == {'Gather'}: ref_idx = i break if ref_idx is None: logger.warning(f'Could not deduplicate initializers corresponding to the torch tied parameters {params} as an initializer used only by Gather nodes could not be found. Skipping deduplication.') continue ref_initializer_name = next(iter(torch_to_initializer[ref_idx]['initializer_name'])) ref_initializer_idx = initializer_name_to_idx[ref_initializer_name] ref_initializer = model.graph.initializer[ref_initializer_idx] ref_type = ref_initializer.data_type ref_data = numpy_helper.to_array(ref_initializer) for i in range(len(torch_to_initializer)): if i == ref_idx: continue initializer_name = next(iter(torch_to_initializer[i]['initializer_name'])) initializer_idx = initializer_name_to_idx[initializer_name] initializer = model.graph.initializer[initializer_idx] initializer_type = initializer.data_type initializer_data = numpy_helper.to_array(initializer) if initializer_name == ref_initializer_name: continue if ref_type == initializer_type and np.array_equal(ref_data, initializer_data): logger.info(f'Removing duplicate initializer {initializer_name}...') for node in model.graph.node: if initializer_name in node.input: input_idx = list(node.input).index(initializer_name) node.input[input_idx] = ref_initializer_name model.graph.initializer.pop(initializer_idx) elif ref_type == initializer_type and np.array_equal(ref_data.T, initializer_data): logger.info(f'Removing duplicate initializer {initializer_name}...') transpose_output_name = f'{ref_initializer_name}_transposed' transpose_node_name = f'Transpose_{len(model.graph.node) + 1}' minimum_node_idx = len(model.graph.node) for (node_idx, node) in enumerate(model.graph.node): if initializer_name in node.input: minimum_node_idx = node_idx break transpose_node = onnx.helper.make_node('Transpose', name=transpose_node_name, inputs=[ref_initializer_name], outputs=[transpose_output_name]) model.graph.node.insert(minimum_node_idx, transpose_node) for node in model.graph.node: if initializer_name in node.input: input_idx = list(node.input).index(initializer_name) node.input[input_idx] = transpose_output_name model.graph.initializer.pop(initializer_idx) else: logger.warning(f'No deduplication implementation for {initializer_name} although it should be deduplicated. Please open an issue in Optimum repository.') return model # File: optimum-main/optimum/onnx/utils.py from pathlib import Path from typing import List, Tuple, Union import onnx from onnx.external_data_helper import ExternalDataInfo, _get_initializer_tensors def _get_onnx_external_constants(model: onnx.ModelProto) -> List[str]: external_constants = [] for node in model.graph.node: if node.op_type == 'Constant': for attribute in node.attribute: external_datas = attribute.t.external_data for external_data in external_datas: external_constants.append(external_data.value) return external_constants def _get_onnx_external_data_tensors(model: onnx.ModelProto) -> List[str]: model_tensors = _get_initializer_tensors(model) model_tensors_ext = [ExternalDataInfo(tensor).location for tensor in model_tensors if tensor.HasField('data_location') and tensor.data_location == onnx.TensorProto.EXTERNAL] return model_tensors_ext def _get_external_data_paths(src_paths: List[Path], dst_paths: List[Path]) -> Tuple[List[Path], List[str]]: model_paths = src_paths.copy() for (idx, model_path) in enumerate(model_paths): model = onnx.load(str(model_path), load_external_data=False) model_tensors = _get_initializer_tensors(model) model_tensors_ext = [ExternalDataInfo(tensor).location for tensor in model_tensors if tensor.HasField('data_location') and tensor.data_location == onnx.TensorProto.EXTERNAL] if len(set(model_tensors_ext)) == 1: src_paths.append(model_path.parent / model_tensors_ext[0]) dst_paths.append(dst_paths[idx].parent / model_tensors_ext[0]) else: src_paths.extend([model_path.parent / tensor_name for tensor_name in model_tensors_ext]) dst_paths.extend((dst_paths[idx].parent / tensor_name for tensor_name in model_tensors_ext)) return (src_paths, dst_paths) def check_model_uses_external_data(model: onnx.ModelProto) -> bool: model_tensors = _get_initializer_tensors(model) return any((tensor.HasField('data_location') and tensor.data_location == onnx.TensorProto.EXTERNAL for tensor in model_tensors)) def has_onnx_input(model: Union[onnx.ModelProto, Path, str], input_name: str) -> bool: if isinstance(model, (str, Path)): model = Path(model).as_posix() model = onnx.load(model, load_external_data=False) for input in model.graph.input: if input.name == input_name: return True return False # File: optimum-main/optimum/onnxruntime/__init__.py from typing import TYPE_CHECKING from transformers.utils import OptionalDependencyNotAvailable, _LazyModule from ..utils import is_diffusers_available _import_structure = {'configuration': ['CalibrationConfig', 'AutoCalibrationConfig', 'QuantizationMode', 'AutoQuantizationConfig', 'OptimizationConfig', 'AutoOptimizationConfig', 'ORTConfig', 'QuantizationConfig'], 'modeling_ort': ['ORTModel', 'ORTModelForAudioClassification', 'ORTModelForAudioFrameClassification', 'ORTModelForAudioXVector', 'ORTModelForCustomTasks', 'ORTModelForCTC', 'ORTModelForFeatureExtraction', 'ORTModelForImageClassification', 'ORTModelForMaskedLM', 'ORTModelForMultipleChoice', 'ORTModelForQuestionAnswering', 'ORTModelForSemanticSegmentation', 'ORTModelForSequenceClassification', 'ORTModelForTokenClassification'], 'modeling_seq2seq': ['ORTModelForSeq2SeqLM', 'ORTModelForSpeechSeq2Seq', 'ORTModelForVision2Seq', 'ORTModelForPix2Struct'], 'modeling_decoder': ['ORTModelForCausalLM'], 'optimization': ['ORTOptimizer'], 'quantization': ['ORTQuantizer'], 'trainer': ['ORTTrainer'], 'trainer_seq2seq': ['ORTSeq2SeqTrainer'], 'training_args': ['ORTTrainingArguments'], 'training_args_seq2seq': ['ORTSeq2SeqTrainingArguments'], 'utils': ['ONNX_DECODER_NAME', 'ONNX_DECODER_MERGED_NAME', 'ONNX_DECODER_WITH_PAST_NAME', 'ONNX_ENCODER_NAME', 'ONNX_WEIGHTS_NAME', 'ORTQuantizableOperator']} try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: _import_structure['.utils.dummy_diffusers_objects'] = ['ORTStableDiffusionPipeline', 'ORTStableDiffusionImg2ImgPipeline', 'ORTStableDiffusionInpaintPipeline', 'ORTStableDiffusionXLPipeline', 'ORTStableDiffusionXLImg2ImgPipeline', 'ORTLatentConsistencyModelPipeline'] else: _import_structure['modeling_diffusion'] = ['ORTStableDiffusionPipeline', 'ORTStableDiffusionImg2ImgPipeline', 'ORTStableDiffusionInpaintPipeline', 'ORTStableDiffusionXLPipeline', 'ORTStableDiffusionXLImg2ImgPipeline', 'ORTLatentConsistencyModelPipeline'] if TYPE_CHECKING: from .configuration import ORTConfig, QuantizationConfig from .modeling_decoder import ORTModelForCausalLM from .modeling_ort import ORTModel, ORTModelForAudioClassification, ORTModelForAudioFrameClassification, ORTModelForAudioXVector, ORTModelForCTC, ORTModelForCustomTasks, ORTModelForFeatureExtraction, ORTModelForImageClassification, ORTModelForMaskedLM, ORTModelForMultipleChoice, ORTModelForQuestionAnswering, ORTModelForSemanticSegmentation, ORTModelForSequenceClassification, ORTModelForTokenClassification from .modeling_seq2seq import ORTModelForPix2Struct, ORTModelForSeq2SeqLM, ORTModelForSpeechSeq2Seq, ORTModelForVision2Seq from .optimization import ORTOptimizer from .quantization import ORTQuantizer from .trainer import ORTTrainer from .trainer_seq2seq import ORTSeq2SeqTrainer from .training_args import ORTTrainingArguments from .training_args_seq2seq import ORTSeq2SeqTrainingArguments from .utils import ONNX_DECODER_MERGED_NAME, ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME, ONNX_ENCODER_NAME, ONNX_WEIGHTS_NAME, ORTQuantizableOperator try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_diffusers_objects import ORTLatentConsistencyModelPipeline, ORTStableDiffusionImg2ImgPipeline, ORTStableDiffusionInpaintPipeline, ORTStableDiffusionPipeline, ORTStableDiffusionXLImg2ImgPipeline, ORTStableDiffusionXLPipeline else: from .modeling_diffusion import ORTLatentConsistencyModelPipeline, ORTStableDiffusionImg2ImgPipeline, ORTStableDiffusionInpaintPipeline, ORTStableDiffusionPipeline, ORTStableDiffusionXLImg2ImgPipeline, ORTStableDiffusionXLPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: optimum-main/optimum/onnxruntime/base.py """""" from abc import abstractmethod from typing import Dict, Optional, Set, Tuple, Union import numpy as np import torch from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput from onnxruntime import InferenceSession from ..utils import NormalizedConfigManager from ..utils.logging import warn_once from .io_binding import TypeHelper from .modeling_ort import ORTModel from .utils import get_ordered_input_names, logging logger = logging.get_logger(__name__) class ORTModelPart: _prepare_onnx_inputs = ORTModel._prepare_onnx_inputs _prepare_onnx_outputs = ORTModel._prepare_onnx_outputs def __init__(self, session: InferenceSession, parent_model: 'ORTModel'): self.session = session self.parent_model = parent_model self.normalized_config = NormalizedConfigManager.get_normalized_config_class(self.parent_model.config.model_type)(self.parent_model.config) self.main_input_name = self.parent_model.main_input_name self.input_names = {input_key.name: idx for (idx, input_key) in enumerate(self.session.get_inputs())} self.output_names = {output_key.name: idx for (idx, output_key) in enumerate(self.session.get_outputs())} self.input_dtypes = {input_key.name: input_key.type for input_key in session.get_inputs()} self.output_dtypes = {output_key.name: output_key.type for output_key in session.get_outputs()} self._ordered_input_names = get_ordered_input_names(self.input_names.keys(), func=self.forward) @property def device(self): return self.parent_model.device @property def dtype(self): for dtype in self.input_dtypes.values(): torch_dtype = TypeHelper.ort_type_to_torch_type(dtype) if torch_dtype.is_floating_point: return torch_dtype for dtype in self.output_dtypes.values(): torch_dtype = TypeHelper.ort_type_to_torch_type(dtype) if torch_dtype.is_floating_point: return torch_dtype return None @abstractmethod def forward(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) class ORTEncoder(ORTModelPart): def forward(self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor, **kwargs) -> BaseModelOutput: use_torch = isinstance(input_ids, torch.Tensor) self.parent_model.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.parent_model.use_io_binding: model_inputs = [input_ids] if 'attention_mask' in self.input_names: model_inputs.append(attention_mask) (io_binding, output_shapes, output_buffers) = self.parent_model._prepare_io_binding(self.session, *model_inputs, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.session.run_with_iobinding(io_binding) io_binding.synchronize_outputs() last_hidden_state = output_buffers['last_hidden_state'].view(output_shapes['last_hidden_state']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.session.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) last_hidden_state = model_outputs['last_hidden_state'] return BaseModelOutput(last_hidden_state=last_hidden_state) class ORTDecoderForSeq2Seq(ORTModelPart): def __init__(self, session: InferenceSession, parent_model: 'ORTModel'): super().__init__(session, parent_model) self.key_value_input_names = [key for key in self.input_names if '.key' in key or '.value' in key] self.key_value_output_names = [key for key in self.output_names if '.key' in key or '.value' in key] if len(self.key_value_input_names) == 0: self.key_value_input_names = [key for key in self.input_names if 'key_values' in key] if len(self.key_value_output_names) == 0: self.key_value_output_names = [key for key in self.output_names if 'key_values' in key] if self.parent_model.use_cache is True and len(self.key_value_output_names) == 0: raise RuntimeError('Could not find the past key values in the provided model.') self.use_past_in_outputs = len(self.key_value_output_names) > 0 self.use_past_in_inputs = len(self.key_value_input_names) > 0 self.use_fp16 = False for inp in session.get_inputs(): if 'past_key_values' in inp.name and inp.type == 'tensor(float16)': self.use_fp16 = True break self.no_cross_attention_cache = getattr(self.parent_model, 'no_cross_attention_cache', False) if not self.parent_model.use_merged and self.use_past_in_inputs or self.no_cross_attention_cache: self.num_pkv = 2 else: self.num_pkv = 4 self.past_key_values_cross_attention_output_names = set() for output_name in self.output_names: if output_name.startswith('present') and 'encoder' in output_name: self.past_key_values_cross_attention_output_names.add(output_name) self.use_legacy_outputs = self.parent_model.use_merged is False and len(self.past_key_values_cross_attention_output_names) > 0 def compute_past_key_values_output_shapes(self, input_ids: torch.Tensor, encoder_hidden_states: torch.Tensor, use_cache_branch: Optional[bool], past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None) -> Dict[str, int]: batch_size = input_ids.size(0) num_attention_heads = self.normalized_config.num_attention_heads embed_size_per_head = self.normalized_config.hidden_size // num_attention_heads sequence_length = input_ids.size(1) encoder_sequence_length = encoder_hidden_states.size(1) if past_key_values is not None and use_cache_branch is not False: sequence_length += past_key_values[0].size(2) self_attn_shape = (batch_size, num_attention_heads, sequence_length, embed_size_per_head) if past_key_values is not None and use_cache_branch is True: cross_attn_shape = (0, num_attention_heads, 1, embed_size_per_head) else: cross_attn_shape = (batch_size, num_attention_heads, encoder_sequence_length, embed_size_per_head) past_key_values_shapes = {} for (idx, name) in enumerate(self.key_value_output_names): is_self_attn = idx % 4 < 2 past_key_values_shapes[name] = self_attn_shape if is_self_attn or self.num_pkv == 2 else cross_attn_shape return past_key_values_shapes def get_outputs_not_to_bind(self, use_merged_cache: bool) -> Set[str]: result = {output_name for output_name in self.output_names if not output_name.startswith('present') and output_name not in {'loss', 'logits'}} if use_merged_cache is True: result = result.union(self.past_key_values_cross_attention_output_names) return result def forward(self, input_ids: torch.LongTensor, encoder_hidden_states: torch.FloatTensor, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, use_cache_branch: None=None) -> Seq2SeqLMOutput: use_torch = isinstance(input_ids, torch.Tensor) self.parent_model.raise_on_numpy_input_io_binding(use_torch) if past_key_values is not None: past_key_values = tuple((past_key_value for pkv_per_layer in past_key_values for past_key_value in pkv_per_layer)) use_merged_no_cache = past_key_values is None and self.parent_model.use_merged use_merged_cache = past_key_values is not None and self.parent_model.use_merged (use_cache_branch_tensor, past_key_values, cache_position) = self.prepare_inputs_for_merged(input_ids, past_key_values, cache_position, use_torch=use_torch) if self.parent_model.use_io_binding: known_output_shapes = self.compute_past_key_values_output_shapes(input_ids, encoder_hidden_states, use_cache_branch=use_cache_branch_tensor.item() if use_cache_branch_tensor is not None else None, past_key_values=past_key_values) outputs_to_not_bind = self.get_outputs_not_to_bind(use_merged_cache) model_inputs = [input_ids.contiguous()] if 'encoder_hidden_states' in self.input_names: model_inputs.append(encoder_hidden_states) if 'decoder_attention_mask' in self.input_names: model_inputs.append(decoder_attention_mask) if 'encoder_attention_mask' in self.input_names: model_inputs.append(encoder_attention_mask) if past_key_values is not None: model_inputs += past_key_values if 'labels' in self.input_names: model_inputs.append(labels) known_output_shapes.update({'loss': []}) if use_cache_branch_tensor is not None: model_inputs.append(use_cache_branch_tensor) if 'cache_position' in self.input_names: model_inputs.append(cache_position) (io_binding, output_shapes, output_buffers) = self.parent_model._prepare_io_binding(self.session, *model_inputs, known_output_shapes=known_output_shapes, ordered_input_names=self._ordered_input_names, outputs_to_not_bind=outputs_to_not_bind) for (name, shape) in output_shapes.items(): if name in self.key_value_output_names: output_shapes[name] = shape[:2] + (-1,) + shape[3:] io_binding.synchronize_inputs() self.session.run_with_iobinding(io_binding) io_binding.synchronize_outputs() out_past_key_values = () for name in self.key_value_output_names: if name in self.past_key_values_cross_attention_output_names and use_merged_cache: continue out_past_key_values += (output_buffers[name].view(output_shapes[name]),) logits = output_buffers['logits'].view(output_shapes['logits']) loss = None if 'loss' in self.output_names: loss = output_buffers['loss'].view(output_shapes['loss']) if not self.use_past_in_outputs: out_past_key_values = None elif not self.use_past_in_inputs or use_merged_no_cache: out_past_key_values = tuple((out_past_key_values[i:i + self.num_pkv] for i in range(0, len(out_past_key_values), self.num_pkv))) elif self.use_legacy_outputs is True: msg = 'For the decoder with past, using ONNX models outputting cross attention past key values is deprecated and the support will be removed in optimum 2.0. We recommend exporting again the model with optimum>=1.7.3.' warn_once(logger, msg=msg) out_past_key_values = tuple((out_past_key_values[i:i + self.num_pkv] for i in range(0, len(out_past_key_values), self.num_pkv))) elif self.num_pkv == 2: out_past_key_values = tuple((out_past_key_values[i:i + self.num_pkv] + past_key_values[2 * i + 2:2 * i + 2 + self.num_pkv] for i in range(0, len(out_past_key_values), self.num_pkv))) elif self.num_pkv == 4: out_past_key_values = tuple((out_past_key_values[i:i + 2] + past_key_values[2 * i + 2:2 * i + 4] for i in range(0, len(out_past_key_values), 2))) else: raise ValueError('Unsupported num_pkv') else: model_inputs = {'input_ids': input_ids, 'encoder_hidden_states': encoder_hidden_states, 'decoder_attention_mask': decoder_attention_mask, 'encoder_attention_mask': encoder_attention_mask, 'use_cache_branch': use_cache_branch_tensor, 'cache_position': cache_position, 'labels': labels} if past_key_values is not None: model_inputs.update(zip(self.key_value_input_names, past_key_values)) onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.session.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) out_past_key_values = tuple((model_outputs[output_name] for output_name in self.key_value_output_names)) loss = model_outputs.get('loss', None) logits = model_outputs['logits'] if not self.use_past_in_outputs: out_past_key_values = None elif not self.use_past_in_inputs or use_merged_no_cache or self.no_cross_attention_cache: out_past_key_values = tuple((out_past_key_values[i:i + self.num_pkv] for i in range(0, len(out_past_key_values), self.num_pkv))) elif self.use_legacy_outputs is True: msg = 'For the decoder with past, using ONNX models outputting cross attention past key values is deprecated and the support will be removed in optimum 2.0. We recommend exporting again the model with optimum>=1.7.3.' warn_once(logger, msg=msg) out_past_key_values = tuple((out_past_key_values[i:i + self.num_pkv] for i in range(0, len(out_past_key_values), self.num_pkv))) elif self.num_pkv == 2: out_past_key_values = tuple((out_past_key_values[i:i + self.num_pkv] + past_key_values[2 * i + 2:2 * i + 2 + self.num_pkv] for i in range(0, len(out_past_key_values), self.num_pkv))) elif self.num_pkv == 4: out_past_key_values = tuple((out_past_key_values[i:i + 2] + past_key_values[i + 2:i + 4] for i in range(0, len(out_past_key_values), self.num_pkv))) else: raise ValueError('Unsupported num_pkv') return Seq2SeqLMOutput(loss=loss, logits=logits, past_key_values=out_past_key_values) def prepare_inputs_for_merged(self, input_ids: Optional[Union[torch.LongTensor, np.ndarray]], past_key_values: Optional[Tuple[Union[torch.FloatTensor, np.ndarray]]], cache_position: Optional[Union[torch.Tensor, np.ndarray]], use_torch: bool): constructor = torch if use_torch is True else np if self.parent_model.use_merged: use_cache_branch_tensor = constructor.full((1,), past_key_values is not None) if use_torch and use_cache_branch_tensor is not None: use_cache_branch_tensor = use_cache_branch_tensor.to(self.device) else: use_cache_branch_tensor = None if self.parent_model.use_merged and past_key_values is None: batch_size = input_ids.shape[0] num_attention_heads = self.normalized_config.num_attention_heads embed_size_per_head = self.normalized_config.hidden_size // num_attention_heads dtype = constructor.float16 if self.use_fp16 else constructor.float32 shape = (batch_size, num_attention_heads, 1, embed_size_per_head) key_or_value = constructor.zeros(shape, dtype=dtype) if use_torch is True: key_or_value = key_or_value.to(self.device) past_key_values = tuple((key_or_value for _ in range(len(self.key_value_input_names)))) if self.parent_model.use_merged and cache_position is None: cache_position = constructor.zeros((1,), dtype=constructor.int64) if use_torch is True: cache_position = cache_position.to(self.device) return (use_cache_branch_tensor, past_key_values, cache_position) class ORTDecoder(ORTDecoderForSeq2Seq): def __init__(self, *args, **kwargs): logger.warning('The class `ORTDecoder` is deprecated and will be removed in optimum v1.15.0, please use `ORTDecoderForSeq2Seq` instead.') super().__init__(*args, **kwargs) # File: optimum-main/optimum/onnxruntime/configuration.py """""" import os import warnings from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union from datasets import Dataset from packaging.version import Version, parse from onnxruntime import __version__ as ort_version from onnxruntime.quantization import CalibraterBase, CalibrationMethod, QuantFormat, QuantizationMode, QuantType from onnxruntime.quantization.calibrate import create_calibrator from onnxruntime.quantization.registry import IntegerOpsRegistry, QDQRegistry, QLinearOpsRegistry from onnxruntime.transformers.fusion_options import FusionOptions from ..configuration_utils import BaseConfig from ..utils import logging logger = logging.get_logger(__name__) ORT_DEFAULT_CHANNEL_FOR_OPERATORS = {'MatMul': 1} ORT_DEFAULT_OPS_DYNAMIC_QUANTIZATION = list(IntegerOpsRegistry.keys()) ORT_DEFAULT_OPS_STATIC_QUANTIZATION_QDQ = list(QDQRegistry.keys()) ORT_DEFAULT_OPS_STATIC_QUANTIZATION_QOPS = list(QLinearOpsRegistry.keys()) @dataclass class CalibrationConfig: dataset_name: str dataset_config_name: str dataset_split: str dataset_num_samples: int method: CalibrationMethod num_bins: Optional[int] = None num_quantized_bins: Optional[int] = None percentile: Optional[float] = None moving_average: Optional[bool] = None averaging_constant: Optional[float] = None def create_calibrator(self, onnx_model_path: Union[str, os.PathLike, Path], operators_to_quantize: Optional[List[str]], use_external_data_format: bool=False, force_symmetric_range: bool=False, augmented_model_name: str='augmented_model.onnx') -> CalibraterBase: kwargs = {'model': onnx_model_path, 'op_types_to_calibrate': operators_to_quantize or [], 'calibrate_method': self.method, 'augmented_model_path': augmented_model_name} if parse(ort_version) > Version('1.10.0'): kwargs['use_external_data_format'] = use_external_data_format kwargs['extra_options'] = {'symmetric': force_symmetric_range, 'num_bins': self.num_bins, 'num_quantized_bins': self.num_quantized_bins, 'percentile': self.percentile, 'moving_average': self.moving_average, 'averaging_constant': self.averaging_constant} return create_calibrator(**kwargs) class AutoCalibrationConfig: @staticmethod def minmax(dataset: Dataset, moving_average: bool=False, averaging_constant: float=0.01) -> CalibrationConfig: if moving_average and parse(ort_version) < Version('1.11.0'): raise NotImplementedError('MinMax calibration using the moving average method is only implemented for onnxruntime >= 1.11.0') if moving_average and (not 0 <= averaging_constant <= 1): raise ValueError(f'Invalid averaging constant value ({averaging_constant}) should be within [0, 1]') return CalibrationConfig(dataset_name=dataset.info.builder_name, dataset_config_name=dataset.info.config_name, dataset_split=str(dataset.split), dataset_num_samples=dataset.num_rows, method=CalibrationMethod.MinMax, moving_average=moving_average, averaging_constant=averaging_constant) @staticmethod def entropy(dataset: Dataset, num_bins: int=128, num_quantized_bins: int=128) -> CalibrationConfig: if parse(ort_version) < Version('1.11.0'): raise NotImplementedError('Entropy calibration method is only implemented for onnxruntime >= 1.11.0') if num_bins <= 0: raise ValueError(f'Invalid value num_bins ({num_bins}) should be >= 1') if num_quantized_bins <= 0: raise ValueError(f'Invalid value num_quantized_bins ({num_quantized_bins}) should be >= 1') return CalibrationConfig(dataset_name=dataset.info.builder_name, dataset_config_name=dataset.info.config_name, dataset_split=str(dataset.split), dataset_num_samples=dataset.num_rows, method=CalibrationMethod.Entropy, num_bins=num_bins, num_quantized_bins=num_quantized_bins) @staticmethod def percentiles(dataset: Dataset, num_bins: int=2048, percentile: float=99.999) -> CalibrationConfig: if parse(ort_version) < Version('1.11.0'): raise NotImplementedError('Percentile calibration method is only implemented for onnxruntime >= 1.11.0') if num_bins <= 0: raise ValueError(f'Invalid value num_bins ({num_bins}) should be >= 1') if not 0 <= percentile <= 100: raise ValueError(f'Invalid value percentile ({percentile}) should be within [0, 100]') return CalibrationConfig(dataset_name=dataset.info.builder_name, dataset_config_name=dataset.info.config_name, dataset_split=str(dataset.split), dataset_num_samples=dataset.num_rows, method=CalibrationMethod.Percentile, num_bins=num_bins, percentile=percentile) @dataclass class QuantizationConfig: is_static: bool format: QuantFormat mode: QuantizationMode = QuantizationMode.QLinearOps activations_dtype: QuantType = QuantType.QUInt8 activations_symmetric: bool = False weights_dtype: QuantType = QuantType.QInt8 weights_symmetric: bool = True per_channel: bool = False reduce_range: bool = False nodes_to_quantize: List[str] = field(default_factory=list) nodes_to_exclude: List[str] = field(default_factory=list) operators_to_quantize: List[str] = field(default_factory=list) qdq_add_pair_to_weight: bool = False qdq_dedicated_pair: bool = False qdq_op_type_per_channel_support_to_axis: Dict[str, int] = field(default_factory=lambda : ORT_DEFAULT_CHANNEL_FOR_OPERATORS) def __post_init__(self): ensure_valid_mode_or_raise(self.is_static, self.mode) ensure_valid_data_type_or_raise(self.is_static, self.activations_dtype, self.weights_dtype) if len(self.operators_to_quantize) == 0: (_, _, operators_to_quantize) = default_quantization_parameters(self.is_static, self.format, self.mode, self.operators_to_quantize) self.operators_to_quantize = operators_to_quantize if isinstance(self.format, str): self.format = QuantFormat[self.format] if isinstance(self.mode, str): self.mode = QuantizationMode[self.mode] if isinstance(self.activations_dtype, str): self.activations_dtype = QuantType[self.activations_dtype] if isinstance(self.weights_dtype, str): self.weights_dtype = QuantType[self.weights_dtype] @staticmethod def quantization_type_str(activations_dtype: QuantType, weights_dtype: QuantType) -> str: return f"{('s8' if activations_dtype == QuantType.QInt8 else 'u8')}/{('s8' if weights_dtype == QuantType.QInt8 else 'u8')}" @property def use_symmetric_calibration(self) -> bool: return self.activations_symmetric and self.weights_symmetric def __str__(self): return f'{self.format} (mode: {self.mode}, schema: {QuantizationConfig.quantization_type_str(self.activations_dtype, self.weights_dtype)}, channel-wise: {self.per_channel})' def ensure_valid_mode_or_raise(use_static_quantization: bool, mode: QuantizationMode): if not use_static_quantization and mode == QuantizationMode.QLinearOps: raise ValueError('Invalid combination of use_static_quantization = False and mode = QuantizationMode.QLinearOps. OnnxRuntime dynamic quantization requires mode = QuantizationMode.IntegerOps') def ensure_valid_data_type_or_raise(use_static_quantization: bool, activations_dtype: QuantType, weights_dtype: QuantType): if not use_static_quantization and activations_dtype == QuantType.QInt8: raise ValueError('Invalid combination of use_static_quantization = False and activations_dtype = QuantType.QInt8. OnnxRuntime dynamic quantization requires activations_dtype = QuantType.QUInt8') if use_static_quantization and activations_dtype == QuantType.QInt8 and (weights_dtype == QuantType.QUInt8): raise ValueError('Invalid combination of use_static_quantization = True, activations_dtype = QuantType.QInt8 and weights_dtype = QuantType.QUInt8.OnnxRuntime static quantization does not support activations_dtype = QuantType.QInt8 with weights_dtype = QuantType.QUInt8.') def default_quantization_parameters(is_static: bool, format: Optional[QuantFormat]=None, mode: Optional[QuantizationMode]=None, operators_to_quantize: Optional[List[str]]=None) -> Tuple[QuantFormat, QuantizationMode, List[str]]: if format is None: format = QuantFormat.QDQ if is_static else QuantFormat.QOperator if mode is None: mode = QuantizationMode.QLinearOps if is_static else QuantizationMode.IntegerOps if operators_to_quantize is None or len(operators_to_quantize) == 0: if is_static and format == QuantFormat.QDQ: operators_to_quantize = ORT_DEFAULT_OPS_STATIC_QUANTIZATION_QDQ elif is_static and mode == QuantizationMode.QLinearOps: operators_to_quantize = ORT_DEFAULT_OPS_STATIC_QUANTIZATION_QOPS elif not is_static and mode == QuantizationMode.IntegerOps: operators_to_quantize = ORT_DEFAULT_OPS_DYNAMIC_QUANTIZATION return (format, mode, operators_to_quantize) class AutoQuantizationConfig: @staticmethod def arm64(is_static: bool, use_symmetric_activations: bool=False, use_symmetric_weights: bool=True, per_channel: bool=True, nodes_to_quantize: Optional[List[str]]=None, nodes_to_exclude: Optional[List[str]]=None, operators_to_quantize: Optional[List[str]]=None): (format, mode, operators_to_quantize) = default_quantization_parameters(is_static, operators_to_quantize=operators_to_quantize) return QuantizationConfig(is_static=is_static, format=format, mode=mode, activations_dtype=QuantType.QUInt8, activations_symmetric=use_symmetric_activations, weights_dtype=QuantType.QInt8, weights_symmetric=use_symmetric_weights, per_channel=per_channel, reduce_range=False, nodes_to_quantize=nodes_to_quantize or [], nodes_to_exclude=nodes_to_exclude or [], operators_to_quantize=operators_to_quantize) @staticmethod def avx2(is_static: bool, use_symmetric_activations: bool=False, use_symmetric_weights: bool=True, per_channel: bool=True, reduce_range: bool=False, nodes_to_quantize: Optional[List[str]]=None, nodes_to_exclude: Optional[List[str]]=None, operators_to_quantize: Optional[List[str]]=None) -> QuantizationConfig: (format, mode, operators_to_quantize) = default_quantization_parameters(is_static, operators_to_quantize=operators_to_quantize) return QuantizationConfig(is_static=is_static, format=format, mode=mode, activations_dtype=QuantType.QUInt8, activations_symmetric=use_symmetric_activations, weights_dtype=QuantType.QUInt8, weights_symmetric=use_symmetric_weights, per_channel=per_channel, reduce_range=reduce_range, nodes_to_quantize=nodes_to_quantize or [], nodes_to_exclude=nodes_to_exclude or [], operators_to_quantize=operators_to_quantize) @staticmethod def avx512(is_static: bool, use_symmetric_activations: bool=False, use_symmetric_weights: bool=True, per_channel: bool=True, reduce_range: bool=False, nodes_to_quantize: Optional[List[str]]=None, nodes_to_exclude: Optional[List[str]]=None, operators_to_quantize: Optional[List[str]]=None) -> QuantizationConfig: (format, mode, operators_to_quantize) = default_quantization_parameters(is_static, operators_to_quantize=operators_to_quantize) return QuantizationConfig(is_static=is_static, format=format, mode=mode, activations_dtype=QuantType.QUInt8, activations_symmetric=use_symmetric_activations, weights_dtype=QuantType.QInt8, weights_symmetric=use_symmetric_weights, per_channel=per_channel, reduce_range=reduce_range, nodes_to_quantize=nodes_to_quantize or [], nodes_to_exclude=nodes_to_exclude or [], operators_to_quantize=operators_to_quantize) @staticmethod def avx512_vnni(is_static: bool, use_symmetric_activations: bool=False, use_symmetric_weights: bool=True, per_channel: bool=True, nodes_to_quantize: Optional[List[str]]=None, nodes_to_exclude: Optional[List[str]]=None, operators_to_quantize: Optional[List[str]]=None) -> QuantizationConfig: (format, mode, operators_to_quantize) = default_quantization_parameters(is_static, operators_to_quantize=operators_to_quantize) return QuantizationConfig(is_static=is_static, format=format, mode=mode, activations_dtype=QuantType.QUInt8, activations_symmetric=use_symmetric_activations, weights_dtype=QuantType.QInt8, weights_symmetric=use_symmetric_weights, per_channel=per_channel, reduce_range=False, nodes_to_quantize=nodes_to_quantize or [], nodes_to_exclude=nodes_to_exclude or [], operators_to_quantize=operators_to_quantize) @staticmethod def tensorrt(per_channel: bool=True, nodes_to_quantize: Optional[List[str]]=None, nodes_to_exclude: Optional[List[str]]=None, operators_to_quantize: Optional[List[str]]=None) -> QuantizationConfig: (format, mode, operators_to_quantize) = default_quantization_parameters(is_static=True, operators_to_quantize=operators_to_quantize) return QuantizationConfig(is_static=True, format=format, mode=mode, activations_dtype=QuantType.QInt8, activations_symmetric=True, weights_dtype=QuantType.QInt8, weights_symmetric=True, per_channel=per_channel, reduce_range=False, nodes_to_quantize=nodes_to_quantize or [], nodes_to_exclude=nodes_to_exclude or [], operators_to_quantize=operators_to_quantize, qdq_add_pair_to_weight=True, qdq_dedicated_pair=True) @dataclass class OptimizationConfig: optimization_level: int = 1 optimize_for_gpu: bool = False fp16: bool = False optimize_with_onnxruntime_only: Optional[bool] = None enable_transformers_specific_optimizations: bool = True disable_gelu: Optional[bool] = None disable_gelu_fusion: bool = False disable_layer_norm: Optional[bool] = None disable_layer_norm_fusion: bool = False disable_attention: Optional[bool] = None disable_attention_fusion: bool = False disable_skip_layer_norm: Optional[bool] = None disable_skip_layer_norm_fusion: bool = False disable_bias_skip_layer_norm: Optional[bool] = None disable_bias_skip_layer_norm_fusion: bool = False disable_bias_gelu: Optional[bool] = None disable_bias_gelu_fusion: bool = False disable_embed_layer_norm: Optional[bool] = None disable_embed_layer_norm_fusion: bool = True enable_gelu_approximation: bool = False use_mask_index: bool = False no_attention_mask: bool = False disable_embed_layer_norm: bool = True disable_shape_inference: bool = False use_multi_head_attention: bool = False enable_gemm_fast_gelu_fusion: bool = False use_raw_attention_mask: bool = False disable_group_norm_fusion: bool = True disable_packed_kv: bool = True disable_rotary_embeddings: bool = False def __post_init__(self): def deprecate_renamed_attribute(old_name, new_name, mapping_func=None): if getattr(self, old_name, None) is not None: if mapping_func is None: def identity(x): return x mapping_func = identity setattr(self, new_name, mapping_func(getattr(self, old_name))) warnings.warn(f'{old_name} will be deprecated soon, use {new_name} instead, {new_name} is set to {getattr(self, new_name)}.', FutureWarning) deprecate_renamed_attribute('optimize_with_onnxruntime_only', 'enable_transformers_specific_optimizations', mapping_func=lambda x: not x) deprecate_renamed_attribute('disable_gelu', 'disable_bias_gelu_fusion') deprecate_renamed_attribute('disable_layer_norm', 'disable_layer_norm_fusion') deprecate_renamed_attribute('disable_attention', 'disable_attention_fusion') deprecate_renamed_attribute('disable_skip_layer_norm', 'disable_skip_layer_norm_fusion') deprecate_renamed_attribute('disable_bias_skip_layer_norm', 'disable_bias_skip_layer_norm_fusion') deprecate_renamed_attribute('disable_bias_gelu', 'disable_bias_gelu_fusion') deprecate_renamed_attribute('disable_embed_layer_norm', 'disable_embed_layer_norm_fusion') def create_fusion_options(self, model_type: str) -> FusionOptions: class Box: pass args = Box() args.model_type = model_type attribute_map = {'disable_gelu_fusion': 'disable_gelu', 'disable_layer_norm_fusion': 'disable_layer_norm', 'disable_attention_fusion': 'disable_attention', 'disable_skip_layer_norm_fusion': 'disable_skip_layer_norm', 'disable_bias_skip_layer_norm_fusion': 'disable_bias_skip_layer_norm', 'disable_bias_gelu_fusion': 'disable_bias_gelu', 'disable_embed_layer_norm_fusion': 'disable_embed_layer_norm', 'disable_group_norm_fusion': 'disable_group_norm', 'disable_packed_kv': 'disable_packed_kv', 'use_raw_attention_mask': 'use_raw_attention_mask', 'enable_gemm_fast_gelu_fusion': 'enable_gemm_fast_gelu', 'use_multi_head_attention': 'use_multi_head_attention', 'disable_rotary_embeddings': 'disable_rotary_embeddings'} for (attr_name, fusion_attr_name) in attribute_map.items(): setattr(args, fusion_attr_name, getattr(self, attr_name)) for (attr, value) in self.__dict__.items(): if hasattr(args, attr): continue setattr(args, attr, value) return FusionOptions.parse(args) class AutoOptimizationConfig: _LEVELS = {'O1': {'optimization_level': 1, 'enable_transformers_specific_optimizations': False}, 'O2': {'optimization_level': 2, 'enable_transformers_specific_optimizations': True}, 'O3': {'optimization_level': 2, 'enable_transformers_specific_optimizations': True, 'enable_gelu_approximation': True}, 'O4': {'optimization_level': 2, 'enable_transformers_specific_optimizations': True, 'enable_gelu_approximation': True, 'fp16': True}} @classmethod def with_optimization_level(cls, optimization_level: str, for_gpu: bool=False, **kwargs) -> OptimizationConfig: if optimization_level not in cls._LEVELS: raise ValueError(f"optimization_level must be in {', '.join(cls._LEVELS.keys())}, got {optimization_level}") if optimization_level == 'O4': if for_gpu is False: logger.warning('Overridding for_gpu=False to for_gpu=True as half precision is available only on GPU.') for_gpu = True return OptimizationConfig(optimize_for_gpu=for_gpu, **cls._LEVELS[optimization_level], **kwargs) @classmethod def O1(cls, for_gpu: bool=False, **kwargs) -> OptimizationConfig: return cls.with_optimization_level('O1', for_gpu=for_gpu, **kwargs) @classmethod def O2(cls, for_gpu: bool=False, **kwargs) -> OptimizationConfig: return cls.with_optimization_level('O2', for_gpu=for_gpu, **kwargs) @classmethod def O3(cls, for_gpu: bool=False, **kwargs) -> OptimizationConfig: return cls.with_optimization_level('O3', for_gpu=for_gpu, **kwargs) @classmethod def O4(cls, for_gpu: bool=True, **kwargs) -> OptimizationConfig: return cls.with_optimization_level('O4', for_gpu=for_gpu, **kwargs) class ORTConfig(BaseConfig): CONFIG_NAME = 'ort_config.json' FULL_CONFIGURATION_FILE = 'ort_config.json' def __init__(self, opset: Optional[int]=None, use_external_data_format: bool=False, one_external_file: bool=True, optimization: Optional[OptimizationConfig]=None, quantization: Optional[QuantizationConfig]=None, **kwargs): super().__init__() self.opset = opset self.use_external_data_format = use_external_data_format self.one_external_file = one_external_file if isinstance(optimization, dict) and optimization: self.optimization = OptimizationConfig(**optimization) elif isinstance(optimization, OptimizationConfig): self.optimization = optimization elif not optimization: self.optimization = None else: raise ValueError(f'Optional argument `optimization` must be a dictionary or an instance of OptimizationConfig, got {type(optimization)}') if isinstance(quantization, dict) and quantization: self.quantization = QuantizationConfig(**quantization) elif isinstance(quantization, QuantizationConfig): self.quantization = quantization elif not quantization: self.quantization = None else: raise ValueError(f'Optional argument `quantization` must be a dictionary or an instance of QuantizationConfig, got {type(quantization)}') self.optimum_version = kwargs.pop('optimum_version', None) @staticmethod def dataclass_to_dict(config) -> dict: new_config = {} if config is None: return new_config if isinstance(config, dict): return config for (k, v) in asdict(config).items(): if isinstance(v, Enum): v = v.name elif isinstance(v, list): v = [elem.name if isinstance(elem, Enum) else elem for elem in v] new_config[k] = v return new_config def to_dict(self) -> Dict[str, Any]: dict_config = {'opset': self.opset, 'use_external_data_format': self.use_external_data_format, 'one_external_file': self.one_external_file, 'optimization': self.dataclass_to_dict(self.optimization), 'quantization': self.dataclass_to_dict(self.quantization)} if self.optimum_version: dict_config['optimum_version'] = self.optimum_version return dict_config # File: optimum-main/optimum/onnxruntime/io_binding/io_binding_helper.py import logging import traceback from typing import TYPE_CHECKING import numpy as np import torch import onnxruntime as ort from onnxruntime.capi.onnxruntime_inference_collection import OrtValue from onnxruntime.transformers.io_binding_helper import TypeHelper as ORTTypeHelper from ..utils import is_cupy_available, is_onnxruntime_training_available if TYPE_CHECKING: from ..modeling_ort import ORTModel if is_cupy_available(): import cupy as cp class TypeHelper(ORTTypeHelper): @staticmethod def ort_type_to_numpy_type(ort_type: str): ort_type_to_numpy_type_map = {'tensor(int64)': np.int64, 'tensor(int32)': np.int32, 'tensor(int8)': np.int8, 'tensor(float)': np.float32, 'tensor(float16)': np.float16, 'tensor(bool)': bool} if ort_type in ort_type_to_numpy_type_map: return ort_type_to_numpy_type_map[ort_type] else: raise ValueError(f'{ort_type} is not supported. Here is a list of supported data type: {ort_type_to_numpy_type_map.keys()}') @staticmethod def ort_type_to_torch_type(ort_type: str): ort_type_to_torch_type_map = {'tensor(int64)': torch.int64, 'tensor(int32)': torch.int32, 'tensor(int8)': torch.int8, 'tensor(float)': torch.float32, 'tensor(float16)': torch.float16, 'tensor(bool)': torch.bool} if ort_type in ort_type_to_torch_type_map: return ort_type_to_torch_type_map[ort_type] else: raise ValueError(f'{ort_type} is not supported. Here is a list of supported data type: {ort_type_to_torch_type_map.keys()}') class IOBindingHelper: def __init__(self, model: ort.InferenceSession, device, **kwargs): self.model = model self.device = device self.model_inputs = {output_key.name: idx for (idx, output_key) in enumerate(model.get_inputs())} self.model_outputs = {output_key.name: idx for (idx, output_key) in enumerate(model.get_outputs())} self.model_input_names = list(self.model_inputs.keys()) self.model_output_names = list(self.model_outputs.keys()) @staticmethod def to_pytorch(ort_value: OrtValue) -> torch.Tensor: if is_onnxruntime_training_available(): return IOBindingHelper.to_pytorch_via_dlpack(ort_value) else: try: return IOBindingHelper.to_pytorch_via_cupy(ort_value) except Exception: logging.error(traceback.format_exc()) logging.info('Unable to access output memory in CUDA, will offload to CPU') return IOBindingHelper.to_pytorch_via_numpy(ort_value) @staticmethod def to_pytorch_via_numpy(ort_value: OrtValue) -> torch.Tensor: ort_device = ort_value.device_name().lower() return torch.tensor(ort_value.numpy()).to(ort_device) @staticmethod def to_pytorch_via_cupy(ort_value: OrtValue) -> torch.Tensor: ort_device = ort_value.device_name().lower() if ort_device != 'cuda': raise RuntimeError(f'Exchange tensors to PyTorch via CuPy only when device is CUDA, got: {ort_device}') ort_type = ort_value.data_type() numpy_type = TypeHelper.ort_type_to_numpy_type(ort_type) memory = cp.cuda.UnownedMemory(ort_value.data_ptr(), 0, None) memory_ptr = cp.cuda.MemoryPointer(memory, 0) cp_array = cp.ndarray(shape=ort_value.shape(), memptr=memory_ptr, dtype=numpy_type) torch_tensor = torch.from_dlpack(cp_array.toDlpack()) if 'bool' in ort_type: torch_tensor = torch_tensor.to(torch.bool) torch_tensor = torch_tensor.clone() return torch_tensor @staticmethod def to_pytorch_via_dlpack(ort_value: OrtValue) -> torch.Tensor: from torch._C import _from_dlpack torch_tensor = _from_dlpack(ort_value.to_dlpack()) return torch_tensor @staticmethod def get_device_index(device): if isinstance(device, str): device = torch.device(device) elif isinstance(device, int): return device return 0 if device.index is None else device.index @staticmethod def prepare_io_binding(ort_model: 'ORTModel', **inputs) -> ort.IOBinding: if not all((input_name in inputs.keys() for input_name in ort_model.input_names)): raise ValueError(f'The ONNX model takes {ort_model.input_names.keys()} as inputs, but only {inputs.keys()} are given.') name_to_np_type = TypeHelper.get_io_numpy_type_map(ort_model.model) io_binding = ort_model.model.io_binding() for input_name in ort_model.input_names: onnx_input = inputs.pop(input_name) onnx_input = onnx_input.contiguous() io_binding.bind_input(input_name, onnx_input.device.type, ort_model.device.index, name_to_np_type[input_name], list(onnx_input.size()), onnx_input.data_ptr()) for name in ort_model.output_names: io_binding.bind_output(name, ort_model.device.type, device_id=ort_model.device.index) return io_binding # File: optimum-main/optimum/onnxruntime/model.py import logging import os from typing import Callable, Dict, List, Optional, Union import numpy as np from datasets import Dataset from transformers import EvalPrediction from transformers.trainer_pt_utils import nested_concat from transformers.trainer_utils import EvalLoopOutput from onnxruntime import InferenceSession logger = logging.getLogger(__name__) class ORTModel: def __init__(self, model_path: Union[str, os.PathLike], execution_provider: Optional[str]='CPUExecutionProvider', compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None, label_names: Optional[List[str]]=None): logger.warning('The class `optimum.onnxruntime.model.ORTModel` is deprecated and will be removed in the next release.') self.compute_metrics = compute_metrics self.label_names = ['labels'] if label_names is None else label_names self.session = InferenceSession(str(model_path), providers=[execution_provider]) self.onnx_input_names = {input_key.name: idx for (idx, input_key) in enumerate(self.session.get_inputs())} def evaluation_loop(self, dataset: Dataset): logger.info('***** Running evaluation *****') all_preds = None all_labels = None for (step, inputs) in enumerate(dataset): has_labels = all((inputs.get(k) is not None for k in self.label_names)) if has_labels: labels = tuple((np.array([inputs.get(name)]) for name in self.label_names)) if len(labels) == 1: labels = labels[0] else: labels = None onnx_inputs = {key: np.array([inputs[key]]) for key in self.onnx_input_names if key in inputs} preds = self.session.run(None, onnx_inputs) if len(preds) == 1: preds = preds[0] all_preds = preds if all_preds is None else nested_concat(all_preds, preds, padding_index=-100) all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) if self.compute_metrics is not None and all_preds is not None and (all_labels is not None): metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) else: metrics = {} return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=len(dataset)) # File: optimum-main/optimum/onnxruntime/modeling_decoder.py """""" import logging import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import numpy as np import onnx import torch from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from onnx.tools import update_model_dims from transformers import AutoModelForCausalLM, GenerationConfig from transformers.file_utils import add_end_docstrings, add_start_docstrings_to_model_forward from transformers.modeling_outputs import CausalLMOutputWithPast import onnxruntime from ..exporters.onnx import MODEL_TYPES_REQUIRING_POSITION_IDS, main_export from ..onnx.utils import check_model_uses_external_data from ..utils import NormalizedConfigManager, check_if_transformers_greater from ..utils.modeling_utils import MODEL_TO_PATCH_FOR_PAST from ..utils.save_utils import maybe_save_preprocessors from .constants import DECODER_MERGED_ONNX_FILE_PATTERN, DECODER_ONNX_FILE_PATTERN, DECODER_WITH_PAST_ONNX_FILE_PATTERN from .modeling_ort import ONNX_MODEL_END_DOCSTRING, ORTModel from .models.bloom import bloom_convert_to_bloom_cache, bloom_convert_to_standard_cache from .utils import ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME, ONNX_WEIGHTS_NAME if TYPE_CHECKING: from transformers import PretrainedConfig if check_if_transformers_greater('4.25.0'): from transformers.generation import GenerationMixin else: from transformers.generation_utils import GenerationMixin logger = logging.getLogger(__name__) DECODER_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, sequence_length)`.\n attention_mask (`torch.LongTensor`, *optional*):\n Mask to avoid performing attention on padding token indices, of shape\n `(batch_size, sequence_length)`. Mask values selected in `[0, 1]`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`.\n' CAUSALLM_ONNX_MODEL_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, sequence_length)`.\n attention_mask (`torch.LongTensor`):\n Mask to avoid performing attention on padding token indices, of shape\n `(batch_size, sequence_length)`. Mask values selected in `[0, 1]`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`.\n' _TOKENIZER_FOR_DOC = 'AutoTokenizer' TEXT_GENERATION_EXAMPLE = '\n Example of text generation:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("My name is Arthur and I live in", return_tensors="pt")\n\n >>> gen_tokens = model.generate(**inputs,do_sample=True,temperature=0.9, min_length=20,max_length=20)\n >>> tokenizer.batch_decode(gen_tokens) # doctest: +IGNORE_RESULT\n ```\n\n Example using `transformers.pipelines`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_gen = pipeline("text-generation", model=model, tokenizer=tokenizer)\n\n >>> text = "My name is Arthur and I live in"\n >>> gen = onnx_gen(text)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForCausalLM(ORTModel, GenerationMixin): auto_model_class = AutoModelForCausalLM main_input_name = 'input_ids' _supports_cache_class = False def __init__(self, model: onnxruntime.InferenceSession, config: 'PretrainedConfig', use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, generation_config: Optional[GenerationConfig]=None, use_cache: Optional[bool]=None, **kwargs): if use_io_binding is None: use_io_binding = model.get_providers()[0] in ['CPUExecutionProvider', 'CUDAExecutionProvider'] super().__init__(model, config, use_io_binding, model_save_dir, preprocessors, **kwargs) self.num_pkv = 2 self.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.model_type)(config) self.key_value_input_names = [key for key in self.input_names if '.key' in key or '.value' in key] self.key_value_output_names = [key for key in self.output_names if '.key' in key or '.value' in key] self.use_cache = len(self.key_value_input_names) > 0 if generation_config is None: generation_config = GenerationConfig.from_model_config(config) self.generation_config = generation_config self.onnx_paths = [self.model_path] self.use_merged = 'use_cache_branch' in self.input_names self.model_type = self.config.model_type self.use_fp16 = False for inp in model.get_inputs(): if (inp.name == 'past_key_values' or inp.name in self.key_value_input_names) and inp.type == 'tensor(float16)': self.use_fp16 = True break model_type = config.model_type.replace('_', '-') if model_type in MODEL_TYPES_REQUIRING_POSITION_IDS and 'position_ids' not in self.input_names: logger.warning(f'ORTModelForCausalLM loaded a legacy ONNX model with no position_ids input, although this input is required for batched generation for the architecture {model_type}. We strongly encourage to re-export the model with optimum>=1.14 for position_ids and batched inference support.') if use_cache ^ self.use_cache: raise ValueError(f'`use_cache` was set to `{use_cache}` but the loaded model only supports `use_cache={self.use_cache}`. Please load your current model with `use_cache={self.use_cache}` or export the original model once again with `use_cache={use_cache}` when calling the `from_pretrained` method. To export your model, simply set `export=True`.') if use_io_binding and (not use_cache): raise ValueError('The parameters combination use_cache=False, use_io_binding=True is not supported. Please either pass use_cache=True, use_io_binding=True (default), or use_cache=False, use_io_binding=False.') @add_start_docstrings_to_model_forward(CAUSALLM_ONNX_MODEL_DOCSTRING.format('batch_size, sequence_length') + TEXT_GENERATION_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForCausalLM', checkpoint='optimum/gpt2')) def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, labels: Optional[torch.LongTensor]=None, use_cache_branch: bool=None, **kwargs) -> CausalLMOutputWithPast: use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) known_output_shapes = {} use_cache_branch = None loss = None if self.use_cache: if past_key_values is not None: if self.model_type != 'gpt_bigcode': past_key_values = tuple((past_key_value for pkv_per_layer in past_key_values for past_key_value in pkv_per_layer)) (use_cache_branch, past_key_values, known_output_shapes) = self.prepare_past_key_values(input_ids, past_key_values, use_torch) if self.use_io_binding: model_inputs = [input_ids.contiguous()] if 'attention_mask' in self.input_names: model_inputs.append(attention_mask) if 'position_ids' in self.input_names: if position_ids is None: raise ValueError('position_ids was not passed but is a required input for this ONNX model.') model_inputs.append(position_ids.contiguous()) if past_key_values is not None: model_inputs += past_key_values if use_cache_branch is not None: model_inputs.append(use_cache_branch) if 'labels' in self.input_names: model_inputs.append(labels) known_output_shapes.update({'loss': []}) (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(*model_inputs, known_output_shapes=known_output_shapes, ordered_input_names=self._ordered_input_names) if self.device.type == 'cpu': self.model.run_with_iobinding(io_binding) else: io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() if self.use_cache: past_key_values = tuple((output_buffers[name].view(output_shapes[name]) for name in self.key_value_output_names)) logits = output_buffers['logits'].view(output_shapes['logits']) if 'loss' in self.output_names: loss = output_buffers['loss'].view(output_shapes['loss']) else: model_inputs = {'input_ids': input_ids, 'position_ids': position_ids, 'attention_mask': attention_mask, 'use_cache_branch': use_cache_branch, 'labels': labels} if past_key_values is not None: model_inputs.update(zip(self.key_value_input_names, past_key_values)) onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) loss = model_outputs.get('loss', None) logits = model_outputs['logits'] if self.use_cache: past_key_values = tuple((model_outputs[output_name] for output_name in self.key_value_output_names)) if self.use_cache and self.model_type != 'gpt_bigcode': past_key_values = tuple((past_key_values[i:i + self.num_pkv] for i in range(0, len(past_key_values), self.num_pkv))) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=past_key_values) def prepare_past_key_values(self, input_ids: Union[None, torch.LongTensor, np.ndarray], past_key_values: Union[None, Tuple[torch.FloatTensor], Tuple[np.ndarray]], use_torch: bool): sequence_length = input_ids.shape[1] constructor = torch if use_torch else np if self.use_merged: use_cache_branch = constructor.full((1,), past_key_values is not None) else: use_cache_branch = None if use_torch and use_cache_branch is not None: use_cache_branch = use_cache_branch.to(self.device) pkv_output_shape = {} if past_key_values is None: batch_size = input_ids.shape[0] embed_size_per_head = self.normalized_config.hidden_size // self.normalized_config.num_attention_heads if self.model_type == 'gemma': num_attention_heads = self.normalized_config.num_key_value_heads embed_size_per_head = self.normalized_config.head_dim elif self.model_type in {'mistral', 'llama', 'qwen2'}: num_attention_heads = self.normalized_config.num_key_value_heads else: num_attention_heads = self.normalized_config.num_attention_heads dtype = constructor.float16 if self.use_fp16 else constructor.float32 if self.__class__.__name__ == 'ORTBloomForCausalLM': shape_value = (batch_size * num_attention_heads, 0, embed_size_per_head) shape_key = (batch_size * num_attention_heads, embed_size_per_head, 0) key = constructor.zeros(shape_key, dtype=dtype) value = constructor.zeros(shape_value, dtype=dtype) if use_torch: key = key.to(self.device) value = value.to(self.device) past_key_values = tuple((key_or_value for _ in range(len(self.key_value_input_names) // 2) for key_or_value in [key, value])) for (name, value) in zip(self.key_value_output_names, past_key_values): shape = [*value.shape] index = 1 if 'value' in name else 2 shape[index] += sequence_length pkv_output_shape[name] = shape elif self.model_type == 'gpt_bigcode': shape_key_and_value = (batch_size, 0, embed_size_per_head * 2) key_and_value = constructor.zeros(shape_key_and_value, dtype=dtype) if use_torch: key_and_value = key_and_value.to(self.device) past_key_values = tuple((key_and_value for _ in range(len(self.key_value_input_names)))) for (name, value) in zip(self.key_value_output_names, past_key_values): shape = [*value.shape] shape[1] += sequence_length pkv_output_shape[name] = shape else: num_key_value_heads = self.num_key_value_heads if self.model_type == 'falcon' else num_attention_heads shape = (batch_size, num_key_value_heads, 0, embed_size_per_head) key_or_value = constructor.zeros(shape, dtype=dtype) if use_torch: key_or_value = key_or_value.to(self.device) past_key_values = tuple((key_or_value for _ in range(len(self.key_value_input_names)))) for (name, value) in zip(self.key_value_output_names, past_key_values): shape = [*value.shape] shape[2] += sequence_length pkv_output_shape[name] = shape return (use_cache_branch, past_key_values, pkv_output_shape) @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, file_name: Optional[str]=None, subfolder: str='', use_cache: bool=True, local_files_only: bool=False, use_merged: Optional[bool]=None, provider: str='CPUExecutionProvider', session_options: Optional[onnxruntime.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, **kwargs) -> 'ORTModelForCausalLM': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token model_path = Path(model_id) if use_cache is False: if use_merged is True: raise ValueError('The parameters combination use_cache=False, use_merged=True is not supported. To use a merged decoder, past key values must be used.') use_merged = False decoder_name = 'decoder_file_name' if use_cache else 'decoder_with_past_file_name' decoder_file_name = kwargs.pop(decoder_name, None) if decoder_file_name is not None: logger.warning(f'The `{decoder_name}` argument is deprecated, please use `file_name` instead.') file_name = file_name or decoder_file_name if file_name is None: decoder_path = None if use_merged is not False: try: decoder_path = ORTModelForCausalLM.infer_onnx_filename(model_id, [DECODER_MERGED_ONNX_FILE_PATTERN], argument_name=None, subfolder=subfolder, token=token, revision=revision) use_merged = True file_name = decoder_path.name except FileNotFoundError as e: if use_merged is True: raise FileNotFoundError(f'The parameter `use_merged=True` was passed to ORTModelForCausalLM.from_pretrained() but no ONNX file for a merged decoder could be found in {str(Path(model_id, subfolder))}, with the error: {e}') use_merged = False if use_merged is False: pattern = DECODER_WITH_PAST_ONNX_FILE_PATTERN if use_cache else DECODER_ONNX_FILE_PATTERN decoder_path = ORTModelForCausalLM.infer_onnx_filename(model_id, ['^((?!decoder).)*.onnx', pattern], argument_name=None, subfolder=subfolder, token=token, revision=revision) file_name = decoder_path.name if file_name == ONNX_DECODER_WITH_PAST_NAME and config.model_type in MODEL_TO_PATCH_FOR_PAST: raise ValueError(f'ONNX Runtime inference using {ONNX_DECODER_WITH_PAST_NAME} has been deprecated for {config.model_type} architecture. Please re-export your model with optimum>=1.14.0 or set use_cache=False. For details about the deprecation, please refer to https://github.com/huggingface/optimum/releases/tag/v1.14.0.') regular_file_names = [] for name in [ONNX_WEIGHTS_NAME, ONNX_DECODER_WITH_PAST_NAME if use_cache else ONNX_DECODER_NAME]: regular_file_names += ORTModelForCausalLM._generate_regular_names_for_filename(name) if file_name not in regular_file_names: logger.warning(f'The ONNX file {file_name} is not a regular name used in optimum.onnxruntime that are {regular_file_names}, the {cls.__name__} might not behave as expected.') (model_cache_path, preprocessors) = cls._cached_file(model_path=model_path, token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, file_name=file_name, subfolder=subfolder, local_files_only=local_files_only) new_model_save_dir = model_cache_path.parent if model_save_dir is None: model_save_dir = new_model_save_dir onnx_model = onnx.load(str(model_cache_path), load_external_data=False) model_uses_external_data = check_model_uses_external_data(onnx_model) if model_uses_external_data: onnx_model = onnx.load(str(model_cache_path), load_external_data=True) input_dims = {node.name: [dim.dim_value or dim.dim_param for dim in node.type.tensor_type.shape.dim] for node in onnx_model.graph.input} output_dims = {node.name: [dim.dim_value or dim.dim_param for dim in node.type.tensor_type.shape.dim] for node in onnx_model.graph.output} override_dims = False if input_dims['input_ids'][1] == 1: input_dims['input_ids'][1] = 'sequence_length' output_dims['logits'][1] = 'sequence_length' override_dims = True for input_name in input_dims.keys(): if 'past' in input_name and input_dims[input_name][2] == 'past_sequence_length + sequence_length': input_dims[input_name][2] = 'past_sequence_length' override_dims = True if override_dims: logger.warning('The ONNX model was probably exported with an older version of optimum. We are updating the input/output dimensions and overwriting the model file with new dimensions. This is necessary for the model to work correctly with the current version of optimum. If you encounter any issues, please re-export the model with the latest version of optimum for optimal performance.') onnx_model = update_model_dims.update_inputs_outputs_dims(onnx_model, input_dims, output_dims) onnx.save(onnx_model, str(model_cache_path), save_as_external_data=model_uses_external_data, location=model_cache_path.name + '_data', all_tensors_to_one_file=True, convert_attribute=True, size_threshold=0) use_old_bloom_modeling = not check_if_transformers_greater('4.44') for input_name in input_dims.keys(): if input_dims[input_name][0] == 'batch_size x num_heads': use_old_bloom_modeling = True del onnx_model model = ORTModel.load_model(model_cache_path, provider=provider, session_options=session_options, provider_options=provider_options) if config.model_type == 'bloom' and use_old_bloom_modeling: init_cls = ORTBloomForCausalLM elif config.model_type == 'falcon': init_cls = ORTFalconForCausalLM elif config.model_type == 'mpt': init_cls = ORTMPTForCausalLM elif config.model_type == 'opt': init_cls = ORTOPTForCausalLM elif config.model_type == 'gpt_bigcode': init_cls = ORTGPTBigCodeForCausalLM else: init_cls = ORTModelForCausalLM return init_cls(model=model, config=config, use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors, use_cache=use_cache) @classmethod def _from_transformers(cls, model_id: str, config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: str='main', force_download: bool=True, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, use_cache: bool=True, use_merged: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[onnxruntime.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, task: Optional[str]=None) -> 'ORTModelForCausalLM': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token file_name = ONNX_WEIGHTS_NAME if use_merged: logger.warning('The `use_merged` argument is deprecated when the model is exported, and not used anymore.') use_merged = False if task is None: task = cls._auto_model_to_task(cls.auto_model_class) if use_cache: task += '-with-past' save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) main_export(model_name_or_path=model_id, output=save_dir_path, task=task, do_validation=False, no_post_process=False, legacy=False, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code) config.save_pretrained(save_dir_path) maybe_save_preprocessors(model_id, save_dir_path, src_subfolder=subfolder) return cls._from_pretrained(save_dir_path, config, use_cache=use_cache, use_merged=use_merged, provider=provider, session_options=session_options, provider_options=provider_options, use_io_binding=use_io_binding, model_save_dir=save_dir, file_name=file_name) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] attention_mask = kwargs.get('attention_mask', None) use_cache = kwargs.get('use_cache', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'position_ids': position_ids, 'attention_mask': attention_mask} @staticmethod def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past)) class ORTGPTBigCodeForCausalLM(ORTModelForCausalLM): def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): if past_key_values: if self.config.multi_query: past_length = past_key_values[0].shape[1] else: past_length = past_key_values[0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] else: position_ids = None model_inputs = {'input_ids': input_ids} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask}) return model_inputs @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((layer_past.index_select(0, beam_idx.to(layer_past.device)) for layer_past in past_key_values)) class ORTBloomForCausalLM(ORTModelForCausalLM): def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] attention_mask = kwargs.get('attention_mask', None) use_cache = kwargs.get('use_cache', None) if past_key_values: if past_key_values[0][0].shape[0] == input_ids.shape[0]: past_key_values = bloom_convert_to_bloom_cache(past_key_values) return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'position_ids': None, 'attention_mask': attention_mask} @staticmethod def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: standardized_past = bloom_convert_to_standard_cache(past, batch_size=len(beam_idx)) device_to_beam_idx = {past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past} reordered_past = tuple(((layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device])) for layer_past in standardized_past)) return bloom_convert_to_bloom_cache(reordered_past) class ORTOPTForCausalLM(ORTModelForCausalLM): def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] attention_mask = kwargs.get('attention_mask', None) use_cache = kwargs.get('use_cache', None) return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'position_ids': None, 'attention_mask': attention_mask} class ORTMPTForCausalLM(ORTModelForCausalLM): def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] attention_mask = kwargs.get('attention_mask', None) use_cache = kwargs.get('use_cache', None) return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'position_ids': None, 'attention_mask': attention_mask} class ORTFalconForCausalLM(ORTModelForCausalLM): def __init__(self, model: onnxruntime.InferenceSession, config: 'PretrainedConfig', use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, generation_config: Optional[GenerationConfig]=None, use_cache: Optional[bool]=None, **kwargs): super().__init__(model=model, config=config, use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors, generation_config=generation_config, use_cache=use_cache, **kwargs) self.num_key_value_heads = config.num_kv_heads if config.new_decoder_architecture or not config.multi_query else 1 self.use_alibi = config.alibi def _reorder_cache(self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: device_to_beam_idx = {past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past} reordered_past = tuple(((layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device])) for layer_past in past)) return reordered_past def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, past_key_values: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, **kwargs) -> dict: if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if not self.use_alibi and attention_mask is not None and (position_ids is None): position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] return {'input_ids': input_ids, 'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask} # File: optimum-main/optimum/onnxruntime/modeling_diffusion.py import importlib import logging import os import shutil import warnings from abc import abstractmethod from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, Dict, Optional, Union import numpy as np import torch from diffusers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, is_invisible_watermark_available from huggingface_hub import snapshot_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from transformers import CLIPFeatureExtractor, CLIPTokenizer from transformers.file_utils import add_end_docstrings import onnxruntime as ort from ..exporters.onnx import main_export from ..onnx.utils import _get_external_data_paths from ..pipelines.diffusers.pipeline_latent_consistency import LatentConsistencyPipelineMixin from ..pipelines.diffusers.pipeline_stable_diffusion import StableDiffusionPipelineMixin from ..pipelines.diffusers.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipelineMixin from ..pipelines.diffusers.pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipelineMixin from ..pipelines.diffusers.pipeline_stable_diffusion_xl import StableDiffusionXLPipelineMixin from ..pipelines.diffusers.pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipelineMixin from ..pipelines.diffusers.pipeline_utils import VaeImageProcessor from ..utils import DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER, DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER, DIFFUSION_MODEL_UNET_SUBFOLDER, DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER, DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER from .modeling_ort import ONNX_MODEL_END_DOCSTRING, ORTModel from .utils import _ORT_TO_NP_TYPE, ONNX_WEIGHTS_NAME, get_provider_for_device, parse_device, validate_provider_availability logger = logging.getLogger(__name__) class ORTStableDiffusionPipelineBase(ORTModel): auto_model_class = StableDiffusionPipeline main_input_name = 'input_ids' base_model_prefix = 'onnx_model' config_name = 'model_index.json' sub_component_config_name = 'config.json' def __init__(self, vae_decoder_session: ort.InferenceSession, text_encoder_session: ort.InferenceSession, unet_session: ort.InferenceSession, config: Dict[str, Any], tokenizer: CLIPTokenizer, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], feature_extractor: Optional[CLIPFeatureExtractor]=None, vae_encoder_session: Optional[ort.InferenceSession]=None, text_encoder_2_session: Optional[ort.InferenceSession]=None, tokenizer_2: Optional[CLIPTokenizer]=None, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None): self.shared_attributes_init(vae_decoder_session, use_io_binding=use_io_binding, model_save_dir=model_save_dir) self._internal_dict = config self.vae_decoder = ORTModelVaeDecoder(vae_decoder_session, self) self.vae_decoder_model_path = Path(vae_decoder_session._model_path) self.unet = ORTModelUnet(unet_session, self) self.unet_model_path = Path(unet_session._model_path) if text_encoder_session is not None: self.text_encoder_model_path = Path(text_encoder_session._model_path) self.text_encoder = ORTModelTextEncoder(text_encoder_session, self) else: self.text_encoder_model_path = None self.text_encoder = None if vae_encoder_session is not None: self.vae_encoder_model_path = Path(vae_encoder_session._model_path) self.vae_encoder = ORTModelVaeEncoder(vae_encoder_session, self) else: self.vae_encoder_model_path = None self.vae_encoder = None if text_encoder_2_session is not None: self.text_encoder_2_model_path = Path(text_encoder_2_session._model_path) self.text_encoder_2 = ORTModelTextEncoder(text_encoder_2_session, self) else: self.text_encoder_2_model_path = None self.text_encoder_2 = None self.tokenizer = tokenizer self.tokenizer_2 = tokenizer_2 self.scheduler = scheduler self.feature_extractor = feature_extractor self.safety_checker = None sub_models = {DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER: self.text_encoder, DIFFUSION_MODEL_UNET_SUBFOLDER: self.unet, DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER: self.vae_decoder, DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER: self.vae_encoder, DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER: self.text_encoder_2} for name in sub_models.keys(): self._internal_dict[name] = ('diffusers', 'OnnxRuntimeModel') if sub_models[name] is not None else (None, None) self._internal_dict.pop('vae', None) if 'block_out_channels' in self.vae_decoder.config: self.vae_scale_factor = 2 ** (len(self.vae_decoder.config['block_out_channels']) - 1) else: self.vae_scale_factor = 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) @staticmethod def load_model(vae_decoder_path: Union[str, Path], text_encoder_path: Union[str, Path], unet_path: Union[str, Path], vae_encoder_path: Optional[Union[str, Path]]=None, text_encoder_2_path: Optional[Union[str, Path]]=None, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict]=None): vae_decoder = ORTModel.load_model(vae_decoder_path, provider, session_options, provider_options) unet = ORTModel.load_model(unet_path, provider, session_options, provider_options) sessions = {'vae_encoder': vae_encoder_path, 'text_encoder': text_encoder_path, 'text_encoder_2': text_encoder_2_path} for (key, value) in sessions.items(): if value is not None and value.is_file(): sessions[key] = ORTModel.load_model(value, provider, session_options, provider_options) else: sessions[key] = None return (vae_decoder, sessions['text_encoder'], unet, sessions['vae_encoder'], sessions['text_encoder_2']) def _save_pretrained(self, save_directory: Union[str, Path]): save_directory = Path(save_directory) src_to_dst_path = {self.vae_decoder_model_path: save_directory / DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER / ONNX_WEIGHTS_NAME, self.text_encoder_model_path: save_directory / DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER / ONNX_WEIGHTS_NAME, self.unet_model_path: save_directory / DIFFUSION_MODEL_UNET_SUBFOLDER / ONNX_WEIGHTS_NAME} sub_models_to_save = {self.vae_encoder_model_path: DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER, self.text_encoder_2_model_path: DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER} for (path, subfolder) in sub_models_to_save.items(): if path is not None: src_to_dst_path[path] = save_directory / subfolder / ONNX_WEIGHTS_NAME src_paths = list(src_to_dst_path.keys()) dst_paths = list(src_to_dst_path.values()) (src_paths, dst_paths) = _get_external_data_paths(src_paths, dst_paths) for (src_path, dst_path) in zip(src_paths, dst_paths): dst_path.parent.mkdir(parents=True, exist_ok=True) shutil.copyfile(src_path, dst_path) config_path = src_path.parent / self.sub_component_config_name if config_path.is_file(): shutil.copyfile(config_path, dst_path.parent / self.sub_component_config_name) self.scheduler.save_pretrained(save_directory / 'scheduler') if self.feature_extractor is not None: self.feature_extractor.save_pretrained(save_directory / 'feature_extractor') if self.tokenizer is not None: self.tokenizer.save_pretrained(save_directory / 'tokenizer') if self.tokenizer_2 is not None: self.tokenizer_2.save_pretrained(save_directory / 'tokenizer_2') @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: Dict[str, Any], use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, vae_decoder_file_name: str=ONNX_WEIGHTS_NAME, text_encoder_file_name: str=ONNX_WEIGHTS_NAME, unet_file_name: str=ONNX_WEIGHTS_NAME, vae_encoder_file_name: str=ONNX_WEIGHTS_NAME, text_encoder_2_file_name: str=ONNX_WEIGHTS_NAME, local_files_only: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, **kwargs): if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if provider == 'TensorrtExecutionProvider': raise ValueError("The provider `'TensorrtExecutionProvider'` is not supported") model_id = str(model_id) patterns = set(config.keys()) sub_models_to_load = patterns.intersection({'feature_extractor', 'tokenizer', 'tokenizer_2', 'scheduler'}) if not os.path.isdir(model_id): patterns.update({'vae_encoder', 'vae_decoder'}) allow_patterns = {os.path.join(k, '*') for k in patterns if not k.startswith('_')} allow_patterns.update({vae_decoder_file_name, text_encoder_file_name, unet_file_name, vae_encoder_file_name, text_encoder_2_file_name, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name}) model_id = snapshot_download(model_id, cache_dir=cache_dir, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=['*.msgpack', '*.safetensors', '*.bin', '*.xml']) new_model_save_dir = Path(model_id) sub_models = {} for name in sub_models_to_load: (library_name, library_classes) = config[name] if library_classes is not None: library = importlib.import_module(library_name) class_obj = getattr(library, library_classes) load_method = getattr(class_obj, 'from_pretrained') if (new_model_save_dir / name).is_dir(): sub_models[name] = load_method(new_model_save_dir / name) else: sub_models[name] = load_method(new_model_save_dir) (vae_decoder, text_encoder, unet, vae_encoder, text_encoder_2) = cls.load_model(vae_decoder_path=new_model_save_dir / DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER / vae_decoder_file_name, text_encoder_path=new_model_save_dir / DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER / text_encoder_file_name, unet_path=new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, vae_encoder_path=new_model_save_dir / DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER / vae_encoder_file_name, text_encoder_2_path=new_model_save_dir / DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER / text_encoder_2_file_name, provider=provider, session_options=session_options, provider_options=provider_options) if model_save_dir is None: model_save_dir = new_model_save_dir if use_io_binding: raise ValueError('IOBinding is not yet available for stable diffusion model, please set `use_io_binding` to False.') return cls(vae_decoder_session=vae_decoder, text_encoder_session=text_encoder, unet_session=unet, config=config, tokenizer=sub_models.get('tokenizer', None), scheduler=sub_models.get('scheduler'), feature_extractor=sub_models.get('feature_extractor', None), tokenizer_2=sub_models.get('tokenizer_2', None), vae_encoder_session=vae_encoder, text_encoder_2_session=text_encoder_2, use_io_binding=use_io_binding, model_save_dir=model_save_dir) @classmethod def _from_transformers(cls, model_id: str, config: Optional[str]=None, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: str='main', force_download: bool=True, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, task: Optional[str]=None) -> 'ORTStableDiffusionPipeline': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if task is None: task = cls._auto_model_to_task(cls.auto_model_class) save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) main_export(model_name_or_path=model_id, output=save_dir_path, task=task, do_validation=False, no_post_process=True, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code) return cls._from_pretrained(save_dir_path, config=config, provider=provider, session_options=session_options, provider_options=provider_options, use_io_binding=use_io_binding, model_save_dir=save_dir) def to(self, device: Union[torch.device, str, int]): (device, provider_options) = parse_device(device) provider = get_provider_for_device(device) validate_provider_availability(provider) if device.type == 'cuda' and self.providers[0] == 'TensorrtExecutionProvider': return self self.vae_decoder.session.set_providers([provider], provider_options=[provider_options]) self.text_encoder.session.set_providers([provider], provider_options=[provider_options]) self.unet.session.set_providers([provider], provider_options=[provider_options]) if self.vae_encoder is not None: self.vae_encoder.session.set_providers([provider], provider_options=[provider_options]) self.providers = self.vae_decoder.session.get_providers() self._device = device return self @classmethod def _load_config(cls, config_name_or_path: Union[str, os.PathLike], **kwargs): return cls.load_config(config_name_or_path, **kwargs) def _save_config(self, save_directory): self.save_config(save_directory) class _ORTDiffusionModelPart: CONFIG_NAME = 'config.json' def __init__(self, session: ort.InferenceSession, parent_model: ORTModel): self.session = session self.parent_model = parent_model self.input_names = {input_key.name: idx for (idx, input_key) in enumerate(self.session.get_inputs())} self.output_names = {output_key.name: idx for (idx, output_key) in enumerate(self.session.get_outputs())} config_path = Path(session._model_path).parent / self.CONFIG_NAME self.config = self.parent_model._dict_from_json_file(config_path) if config_path.is_file() else {} self.input_dtype = {inputs.name: _ORT_TO_NP_TYPE[inputs.type] for inputs in self.session.get_inputs()} @property def device(self): return self.parent_model.device @abstractmethod def forward(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) class ORTModelTextEncoder(_ORTDiffusionModelPart): def forward(self, input_ids: np.ndarray): onnx_inputs = {'input_ids': input_ids} outputs = self.session.run(None, onnx_inputs) return outputs class ORTModelUnet(_ORTDiffusionModelPart): def __init__(self, session: ort.InferenceSession, parent_model: ORTModel): super().__init__(session, parent_model) def forward(self, sample: np.ndarray, timestep: np.ndarray, encoder_hidden_states: np.ndarray, text_embeds: Optional[np.ndarray]=None, time_ids: Optional[np.ndarray]=None, timestep_cond: Optional[np.ndarray]=None): onnx_inputs = {'sample': sample, 'timestep': timestep, 'encoder_hidden_states': encoder_hidden_states} if text_embeds is not None: onnx_inputs['text_embeds'] = text_embeds if time_ids is not None: onnx_inputs['time_ids'] = time_ids if timestep_cond is not None: onnx_inputs['timestep_cond'] = timestep_cond outputs = self.session.run(None, onnx_inputs) return outputs class ORTModelVaeDecoder(_ORTDiffusionModelPart): def forward(self, latent_sample: np.ndarray): onnx_inputs = {'latent_sample': latent_sample} outputs = self.session.run(None, onnx_inputs) return outputs class ORTModelVaeEncoder(_ORTDiffusionModelPart): def forward(self, sample: np.ndarray): onnx_inputs = {'sample': sample} outputs = self.session.run(None, onnx_inputs) return outputs @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTStableDiffusionPipeline(ORTStableDiffusionPipelineBase, StableDiffusionPipelineMixin): __call__ = StableDiffusionPipelineMixin.__call__ @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTStableDiffusionImg2ImgPipeline(ORTStableDiffusionPipelineBase, StableDiffusionImg2ImgPipelineMixin): __call__ = StableDiffusionImg2ImgPipelineMixin.__call__ @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTStableDiffusionInpaintPipeline(ORTStableDiffusionPipelineBase, StableDiffusionInpaintPipelineMixin): __call__ = StableDiffusionInpaintPipelineMixin.__call__ @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTLatentConsistencyModelPipeline(ORTStableDiffusionPipelineBase, LatentConsistencyPipelineMixin): __call__ = LatentConsistencyPipelineMixin.__call__ class ORTStableDiffusionXLPipelineBase(ORTStableDiffusionPipelineBase): auto_model_class = StableDiffusionXLImg2ImgPipeline def __init__(self, vae_decoder_session: ort.InferenceSession, text_encoder_session: ort.InferenceSession, unet_session: ort.InferenceSession, config: Dict[str, Any], tokenizer: CLIPTokenizer, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], feature_extractor: Optional[CLIPFeatureExtractor]=None, vae_encoder_session: Optional[ort.InferenceSession]=None, text_encoder_2_session: Optional[ort.InferenceSession]=None, tokenizer_2: Optional[CLIPTokenizer]=None, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, add_watermarker: Optional[bool]=None): super().__init__(vae_decoder_session=vae_decoder_session, text_encoder_session=text_encoder_session, unet_session=unet_session, config=config, tokenizer=tokenizer, scheduler=scheduler, feature_extractor=feature_extractor, vae_encoder_session=vae_encoder_session, text_encoder_2_session=text_encoder_2_session, tokenizer_2=tokenizer_2, use_io_binding=use_io_binding, model_save_dir=model_save_dir) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: if not is_invisible_watermark_available(): raise ImportError('`add_watermarker` requires invisible-watermark to be installed, which can be installed with `pip install invisible-watermark`.') from ..pipelines.diffusers.watermark import StableDiffusionXLWatermarker self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTStableDiffusionXLPipeline(ORTStableDiffusionXLPipelineBase, StableDiffusionXLPipelineMixin): __call__ = StableDiffusionXLPipelineMixin.__call__ @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTStableDiffusionXLImg2ImgPipeline(ORTStableDiffusionXLPipelineBase, StableDiffusionXLImg2ImgPipelineMixin): __call__ = StableDiffusionXLImg2ImgPipelineMixin.__call__ # File: optimum-main/optimum/onnxruntime/modeling_ort.py """""" import logging import re import shutil import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union import numpy as np import torch from huggingface_hub import hf_hub_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from huggingface_hub.utils import EntryNotFoundError from transformers import AutoConfig, AutoModel, AutoModelForAudioClassification, AutoModelForAudioFrameClassification, AutoModelForAudioXVector, AutoModelForCTC, AutoModelForImageClassification, AutoModelForMaskedLM, AutoModelForMultipleChoice, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, AutoModelForSequenceClassification, AutoModelForTokenClassification, GenerationMixin from transformers.file_utils import add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput, ImageClassifierOutput, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SemanticSegmenterOutput, SequenceClassifierOutput, TokenClassifierOutput, XVectorOutput import onnxruntime as ort from ..exporters import TasksManager from ..exporters.onnx import main_export from ..modeling_base import FROM_PRETRAINED_START_DOCSTRING, OptimizedModel from ..onnx.utils import _get_external_data_paths from ..utils.file_utils import find_files_matching_pattern from ..utils.save_utils import maybe_load_preprocessors, maybe_save_preprocessors from .io_binding import IOBindingHelper, TypeHelper from .utils import ONNX_WEIGHTS_NAME, check_io_binding, get_device_for_provider, get_ordered_input_names, get_provider_for_device, parse_device, validate_provider_availability if TYPE_CHECKING: from transformers import PretrainedConfig logger = logging.getLogger(__name__) _TOKENIZER_FOR_DOC = 'AutoTokenizer' _FEATURE_EXTRACTOR_FOR_DOC = 'AutoFeatureExtractor' _PROCESSOR_FOR_DOC = 'AutoProcessor' ONNX_MODEL_END_DOCSTRING = '\n This model inherits from [`~onnxruntime.modeling_ort.ORTModel`], check its documentation for the generic methods the\n library implements for all its model (such as downloading or saving).\n\n This class should be initialized using the [`onnxruntime.modeling_ort.ORTModel.from_pretrained`] method.\n' ONNX_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`Union[torch.Tensor, np.ndarray, None]` of shape `({0})`, defaults to `None`):\n Indices of input sequence tokens in the vocabulary.\n Indices can be obtained using [`AutoTokenizer`](https://huggingface.co/docs/transformers/autoclass_tutorial#autotokenizer).\n See [`PreTrainedTokenizer.encode`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizerBase.encode) and\n [`PreTrainedTokenizer.__call__`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizerBase.__call__) for details.\n [What are input IDs?](https://huggingface.co/docs/transformers/glossary#input-ids)\n attention_mask (`Union[torch.Tensor, np.ndarray, None]` of shape `({0})`, defaults to `None`):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](https://huggingface.co/docs/transformers/glossary#attention-mask)\n token_type_ids (`Union[torch.Tensor, np.ndarray, None]` of shape `({0})`, defaults to `None`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:\n - 1 for tokens that are **sentence A**,\n - 0 for tokens that are **sentence B**.\n [What are token type IDs?](https://huggingface.co/docs/transformers/glossary#token-type-ids)\n' ONNX_IMAGE_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`Union[torch.Tensor, np.ndarray, None]` of shape `({0})`, defaults to `None`):\n Pixel values corresponding to the images in the current batch.\n Pixel values can be obtained from encoded images using [`AutoFeatureExtractor`](https://huggingface.co/docs/transformers/autoclass_tutorial#autofeatureextractor).\n' ONNX_AUDIO_INPUTS_DOCSTRING = '\n Args:\n input_values (`torch.Tensor` of shape `({0})`):\n Float values of input raw speech waveform..\n Input values can be obtained from audio file loaded into an array using [`AutoFeatureExtractor`](https://huggingface.co/docs/transformers/autoclass_tutorial#autofeatureextractor).\n' class classproperty: def __init__(self, getter): self.getter = getter def __get__(self, instance, owner): return self.getter(owner) class ORTModel(OptimizedModel): model_type = 'onnx_model' auto_model_class = AutoModel @classproperty def export_feature(cls): logger.warning(f'{cls.__name__}.export_feature is deprecated, and will be removed in optimum 2.0.') try: feature = TasksManager.infer_task_from_model(cls.auto_model_class) except ValueError: feature = None return feature @classmethod def _auto_model_to_task(cls, auto_model_class): return TasksManager.infer_task_from_model(auto_model_class) def shared_attributes_init(self, model: ort.InferenceSession, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, **kwargs): if kwargs.pop('latest_model_name', None) is not None: logger.warning(f'The latest_model_name argument to create an {self.__class__.__name__} is deprecated, and not used anymore.') if kwargs: raise ValueError(f"{self.__class__.__name__} received {', '.join(kwargs.keys())}, but do not accept those arguments.") self.providers = model.get_providers() self._device = get_device_for_provider(self.providers[0], provider_options=model.get_provider_options()[self.providers[0]]) self._model_save_dir_tempdirectory_instance = None if model_save_dir is None: self.model_save_dir = Path(model._model_path).parent elif isinstance(model_save_dir, TemporaryDirectory): self._model_save_dir_tempdirectory_instance = model_save_dir self.model_save_dir = Path(model_save_dir.name) elif isinstance(model_save_dir, str): self.model_save_dir = Path(model_save_dir) else: self.model_save_dir = model_save_dir self.preprocessors = preprocessors if preprocessors is not None else [] if self._device is None: logger.warning(f'ORTModel outputs will be sent to CPU as the device could not be inferred from the execution provider {self.providers[0]}. Use `ort_model.to()` to send the outputs to the wanted device.') self._use_io_binding = use_io_binding AutoConfig.register(self.model_type, AutoConfig) if hasattr(self.auto_model_class, 'register'): self.auto_model_class.register(AutoConfig, self.__class__) self.output_shape_inference_pattern = re.compile('([a-zA-Z_]+)|([0-9]+)|([+-/*])|([\\(\\)])') def __init__(self, model: ort.InferenceSession, config: 'PretrainedConfig', use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, **kwargs): super().__init__(model, config) if use_io_binding is None: if model.get_providers()[0] == 'CUDAExecutionProvider': use_io_binding = True else: use_io_binding = False self.model_path = Path(model._model_path) self.model_name = self.model_path.name self.shared_attributes_init(model, use_io_binding, model_save_dir, preprocessors, **kwargs) self.input_names = {input_key.name: idx for (idx, input_key) in enumerate(model.get_inputs())} self.input_dtypes = {input_key.name: input_key.type for input_key in model.get_inputs()} self.output_names = {output_key.name: idx for (idx, output_key) in enumerate(model.get_outputs())} self.output_dtypes = {output_key.name: output_key.type for output_key in model.get_outputs()} self._ordered_input_names = get_ordered_input_names(self.input_names.keys(), func=self.forward) @property def dtype(self) -> torch.dtype: for dtype in self.input_dtypes.values(): torch_dtype = TypeHelper.ort_type_to_torch_type(dtype) if torch_dtype.is_floating_point: return torch_dtype for dtype in self.output_dtypes.values(): torch_dtype = TypeHelper.ort_type_to_torch_type(dtype) if torch_dtype.is_floating_point: return torch_dtype return None @property def device(self) -> torch.device: return self._device @device.setter def device(self, **kwargs): raise AttributeError('The device attribute is read-only, please use the `to` method to change the device.') @property def use_io_binding(self): return check_io_binding(self.providers, self._use_io_binding) @use_io_binding.setter def use_io_binding(self, value: bool): self._use_io_binding = value def to(self, device: Union[torch.device, str, int]): (device, provider_options) = parse_device(device) if device.type == 'cuda' and self.providers[0] == 'TensorrtExecutionProvider': return self provider = get_provider_for_device(device) validate_provider_availability(provider) if device.type == 'cuda' and self._use_io_binding is False and (provider == 'CUDAExecutionProvider'): self.use_io_binding = True logger.info('use_io_binding was set to False, setting it to True because it can provide a huge speedup on GPUs. It is possible to disable this feature manually by setting the use_io_binding attribute back to False.') if provider == 'ROCMExecutionProvider': self.use_io_binding = False self.model.set_providers([provider], provider_options=[provider_options]) self.providers = self.model.get_providers() self._device = device return self def forward(self, *args, **kwargs): raise NotImplementedError @staticmethod def load_model(path: Union[str, Path], provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None) -> ort.InferenceSession: validate_provider_availability(provider) providers = [provider] if provider == 'TensorrtExecutionProvider': providers.append('CUDAExecutionProvider') if not isinstance(path, str): path = str(path) if provider_options is not None: providers_options = [provider_options] + [{} for _ in range(len(providers) - 1)] else: providers_options = None return ort.InferenceSession(path, providers=providers, sess_options=session_options, provider_options=providers_options) def _save_pretrained(self, save_directory: Union[str, Path]): src_paths = [self.model_path] dst_paths = [Path(save_directory) / self.model_path.name] (src_paths, dst_paths) = _get_external_data_paths(src_paths, dst_paths) for (src_path, dst_path) in zip(src_paths, dst_paths): shutil.copyfile(src_path, dst_path) @staticmethod def _generate_regular_names_for_filename(filename: str): (name, extension) = filename.rsplit('.', maxsplit=1) return [filename, f'{name}_quantized.{extension}', f'{name}_optimized.{extension}'] @staticmethod def infer_onnx_filename(model_name_or_path: Union[str, Path], patterns: List[str], argument_name: str, subfolder: str='', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, fail_if_not_found: bool=True) -> str: if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token onnx_files = [] for pattern in patterns: onnx_files = find_files_matching_pattern(model_name_or_path, pattern, glob_pattern='**/*.onnx', subfolder=subfolder, token=token, revision=revision) if onnx_files: break path = model_name_or_path if subfolder != '': path = f'{path}/{subfolder}' if len(onnx_files) == 0: if fail_if_not_found: raise FileNotFoundError(f'Could not find any ONNX model file for the regex {patterns} in {path}.') return None elif len(onnx_files) > 1: if argument_name is not None: raise RuntimeError(f'Too many ONNX model files were found in {path}, specify which one to load by using the {argument_name} argument.') return onnx_files[0] @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, file_name: Optional[str]=None, subfolder: str='', local_files_only: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, **kwargs) -> 'ORTModel': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token model_path = Path(model_id) regular_onnx_filenames = ORTModel._generate_regular_names_for_filename(ONNX_WEIGHTS_NAME) if file_name is None: if model_path.is_dir(): onnx_files = list(model_path.glob('*.onnx')) else: (repo_files, _) = TasksManager.get_model_files(model_id, revision=revision, cache_dir=cache_dir, token=token) repo_files = map(Path, repo_files) pattern = '*.onnx' if subfolder == '' else f'{subfolder}/*.onnx' onnx_files = [p for p in repo_files if p.match(pattern)] if len(onnx_files) == 0: raise FileNotFoundError(f'Could not find any ONNX model file in {model_path}') elif len(onnx_files) > 1: raise RuntimeError(f'Too many ONNX model files were found in {model_path}, specify which one to load by using the file_name argument.') else: file_name = onnx_files[0].name if file_name not in regular_onnx_filenames: logger.warning(f'The ONNX file {file_name} is not a regular name used in optimum.onnxruntime, the ORTModel might not behave as expected.') (model_cache_path, preprocessors) = cls._cached_file(model_path=model_path, token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, file_name=file_name, subfolder=subfolder, local_files_only=local_files_only) new_model_save_dir = model_cache_path.parent if model_save_dir is None: model_save_dir = new_model_save_dir model = ORTModel.load_model(model_cache_path, provider=provider, session_options=session_options, provider_options=provider_options) return cls(model=model, config=config, use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors) @classmethod def _from_transformers(cls, model_id: str, config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, task: Optional[str]=None) -> 'ORTModel': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token return cls._export(model_id=model_id, config=config, revision=revision, cache_dir=cache_dir, force_download=force_download, token=token, subfolder=subfolder, local_files_only=local_files_only, trust_remote_code=trust_remote_code, provider=provider, session_options=session_options, provider_options=provider_options, use_io_binding=use_io_binding, task=task) @classmethod def _export(cls, model_id: str, config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, task: Optional[str]=None) -> 'ORTModel': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if task is None: task = cls._auto_model_to_task(cls.auto_model_class) save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) main_export(model_name_or_path=model_id, output=save_dir_path, task=task, do_validation=False, no_post_process=True, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code) config.save_pretrained(save_dir_path) maybe_save_preprocessors(model_id, save_dir_path, src_subfolder=subfolder) return cls._from_pretrained(save_dir_path, config, use_io_binding=use_io_binding, model_save_dir=save_dir, provider=provider, session_options=session_options, provider_options=provider_options) @classmethod @add_start_docstrings(FROM_PRETRAINED_START_DOCSTRING) def from_pretrained(cls, model_id: Union[str, Path], export: bool=False, force_download: bool=False, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', config: Optional['PretrainedConfig']=None, local_files_only: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, **kwargs): if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token return super().from_pretrained(model_id, export=export, force_download=force_download, token=token, cache_dir=cache_dir, subfolder=subfolder, config=config, local_files_only=local_files_only, provider=provider, session_options=session_options, provider_options=provider_options, use_io_binding=use_io_binding, **kwargs) def _prepare_output_buffer(self, model: ort.InferenceSession, output_shape: Tuple[int], output_name: str): ort_type = TypeHelper.get_output_type(model, output_name) torch_type = TypeHelper.ort_type_to_torch_type(ort_type) if len(output_shape) > 0: output_buffer = torch.empty(np.prod(output_shape), dtype=torch_type, device=self.device).contiguous() else: output_buffer = torch.tensor(0, dtype=torch_type, device=self.device).contiguous() return output_buffer def _output_shape_inference(self, axis_name: Union[str, int], dimensions: Dict[str, int]) -> Union[str, int]: if isinstance(axis_name, int): return axis_name elif axis_name in dimensions: return dimensions[axis_name] tokens = [] for (idx, match_) in enumerate(re.finditer(self.output_shape_inference_pattern, axis_name)): groups = match_.groups() matched_group = None for (idx, group) in enumerate(groups): if group is not None: matched_group = idx break if matched_group == 0: dim = dimensions.get(groups[0], None) if dim is None or not isinstance(dim, int): return axis_name tokens.append(str(dim)) else: tokens.append(groups[matched_group]) return int(eval(' '.join(tokens))) def _prepare_io_binding(self, model: ort.InferenceSession, *model_inputs: torch.Tensor, ordered_input_names: List[str], known_output_shapes: Optional[Dict[str, Tuple[int]]]=None, outputs_to_not_bind: Optional[Union[Set[str], str]]=None) -> Tuple[ort.IOBinding, Dict[str, Tuple[int]], Dict[str, torch.Tensor]]: io_binding = model.io_binding() name_to_np_type = TypeHelper.get_io_numpy_type_map(model) input_name_to_shape = {} for (idx, tensor) in enumerate(model_inputs): if tensor is None: continue name = ordered_input_names[idx] tensor = tensor.contiguous() input_name_to_shape[name] = tensor.shape data_ptr = tensor.data_ptr() if 'past' in name and data_ptr == 0: data_ptr = model_inputs[0].data_ptr() io_binding.bind_input(name, tensor.device.type, IOBindingHelper.get_device_index(self.device), name_to_np_type[name], tuple(tensor.shape), data_ptr) dimensions = {} for input_ in model.get_inputs(): shape = input_.shape for (idx, axis) in enumerate(shape): if isinstance(axis, str): dimensions[axis] = input_name_to_shape[input_.name][idx] output_shapes = {} output_buffers = {} if known_output_shapes is None: known_output_shapes = {} if outputs_to_not_bind is None: outputs_to_not_bind = set() elif isinstance(outputs_to_not_bind, str): outputs_to_not_bind = {outputs_to_not_bind} for output_node in model.get_outputs(): output_name = output_node.name if output_name in outputs_to_not_bind: continue if output_name in known_output_shapes: output_shape = known_output_shapes[output_name] else: output_shape = [] for axis_name in output_node.shape: output_shape.append(self._output_shape_inference(axis_name, dimensions)) output_buffer = self._prepare_output_buffer(model, output_shape, output_name) io_binding.bind_output(output_name, output_buffer.device.type, IOBindingHelper.get_device_index(self.device), name_to_np_type[output_name], output_shape, output_buffer.data_ptr()) output_shapes[output_name] = output_shape output_buffers[output_name] = output_buffer return (io_binding, output_shapes, output_buffers) def prepare_io_binding(self, *model_inputs, ordered_input_names, outputs_to_not_bind=None, known_output_shapes=None): return self._prepare_io_binding(self.model, *model_inputs, ordered_input_names=ordered_input_names, known_output_shapes=known_output_shapes, outputs_to_not_bind=outputs_to_not_bind) def raise_on_numpy_input_io_binding(self, use_torch: bool): if use_torch is False and self.use_io_binding is True: raise ValueError('IO Binding can not be used when passing numpy inputs. Please disable IO Binding with model.use_io_binding = False, or pass torch.Tensor inputs instead.') def _prepare_onnx_inputs(self, use_torch: bool, **inputs: Union[torch.Tensor, np.ndarray]) -> Dict[str, np.ndarray]: onnx_inputs = {} for input_name in self.input_names.keys(): onnx_inputs[input_name] = inputs.pop(input_name) if use_torch: onnx_inputs[input_name] = onnx_inputs[input_name].cpu().detach().numpy() if onnx_inputs[input_name].dtype != self.input_dtypes[input_name]: onnx_inputs[input_name] = onnx_inputs[input_name].astype(TypeHelper.ort_type_to_numpy_type(self.input_dtypes[input_name])) return onnx_inputs def _prepare_onnx_outputs(self, use_torch: bool, *onnx_outputs: np.ndarray) -> Dict[str, Union[torch.Tensor, np.ndarray]]: model_outputs = {} for (output_name, idx) in self.output_names.items(): model_outputs[output_name] = onnx_outputs[idx] if use_torch: model_outputs[output_name] = torch.from_numpy(model_outputs[output_name]).to(self.device) return model_outputs @staticmethod def _cached_file(model_path: Union[Path, str], use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, file_name: Optional[str]=None, subfolder: str='', local_files_only: bool=False): if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token model_path = Path(model_path) if model_path.is_dir(): model_cache_path = model_path / file_name preprocessors = maybe_load_preprocessors(model_path.as_posix()) else: model_cache_path = hf_hub_download(repo_id=model_path.as_posix(), filename=file_name, subfolder=subfolder, token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, local_files_only=local_files_only) try: hf_hub_download(repo_id=model_path.as_posix(), subfolder=subfolder, filename=file_name + '_data', token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, local_files_only=local_files_only) except EntryNotFoundError: pass model_cache_path = Path(model_cache_path) preprocessors = maybe_load_preprocessors(model_path.as_posix(), subfolder=subfolder) return (model_cache_path, preprocessors) def can_generate(self) -> bool: return isinstance(self, GenerationMixin) FEATURE_EXTRACTION_EXAMPLE = '\n Example of feature extraction:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("My name is Philipp and I live in Germany.", return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> last_hidden_state = outputs.last_hidden_state\n >>> list(last_hidden_state.shape)\n [1, 12, 384]\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_extractor = pipeline("feature-extraction", model=model, tokenizer=tokenizer)\n\n >>> text = "My name is Philipp and I live in Germany."\n >>> pred = onnx_extractor(text)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForFeatureExtraction(ORTModel): auto_model_class = AutoModel @add_start_docstrings_to_model_forward(ONNX_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + FEATURE_EXTRACTION_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForFeatureExtraction', checkpoint='optimum/all-MiniLM-L6-v2')) def forward(self, input_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, token_type_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_ids, attention_mask, token_type_ids, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() last_hidden_state = output_buffers['last_hidden_state'].view(output_shapes['last_hidden_state']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) if 'last_hidden_state' in self.output_names: last_hidden_state = model_outputs['last_hidden_state'] else: last_hidden_state = next(iter(model_outputs.values())) return BaseModelOutput(last_hidden_state=last_hidden_state) @classmethod def _export(cls, model_id: str, config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, task: Optional[str]=None) -> 'ORTModel': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if task is None: task = cls._auto_model_to_task(cls.auto_model_class) save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) main_export(model_name_or_path=model_id, output=save_dir_path, task=task, do_validation=False, no_post_process=True, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, library_name='transformers') config.save_pretrained(save_dir_path) maybe_save_preprocessors(model_id, save_dir_path, src_subfolder=subfolder) return cls._from_pretrained(save_dir_path, config, use_io_binding=use_io_binding, model_save_dir=save_dir, provider=provider, session_options=session_options, provider_options=provider_options) MASKED_LM_EXAMPLE = '\n Example of feature extraction:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("The capital of France is [MASK].", return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> list(logits.shape)\n [1, 8, 28996]\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> fill_masker = pipeline("fill-mask", model=model, tokenizer=tokenizer)\n\n >>> text = "The capital of France is [MASK]."\n >>> pred = fill_masker(text)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForMaskedLM(ORTModel): auto_model_class = AutoModelForMaskedLM @add_start_docstrings_to_model_forward(ONNX_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + MASKED_LM_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForMaskedLM', checkpoint='optimum/bert-base-uncased-for-fill-mask')) def forward(self, input_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, token_type_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_ids, attention_mask, token_type_ids, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return MaskedLMOutput(logits=logits) QUESTION_ANSWERING_EXAMPLE = '\n Example of question answering:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"\n >>> inputs = tokenizer(question, text, return_tensors="np")\n >>> start_positions = torch.tensor([1])\n >>> end_positions = torch.tensor([3])\n\n >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)\n >>> start_scores = outputs.start_logits\n >>> end_scores = outputs.end_logits\n ```\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer)\n\n >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"\n >>> pred = onnx_qa(question, text)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForQuestionAnswering(ORTModel): auto_model_class = AutoModelForQuestionAnswering @add_start_docstrings_to_model_forward(ONNX_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + QUESTION_ANSWERING_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForQuestionAnswering', checkpoint='optimum/roberta-base-squad2')) def forward(self, input_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, token_type_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_ids, attention_mask, token_type_ids, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() start_logits = output_buffers['start_logits'].view(output_shapes['start_logits']) end_logits = output_buffers['end_logits'].view(output_shapes['end_logits']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) start_logits = model_outputs['start_logits'] end_logits = model_outputs['end_logits'] return QuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits) SEQUENCE_CLASSIFICATION_EXAMPLE = '\n Example of single-label classification:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> list(logits.shape)\n [1, 2]\n ```\n\n Example using `transformers.pipelines`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer)\n\n >>> text = "Hello, my dog is cute"\n >>> pred = onnx_classifier(text)\n ```\n\n Example using zero-shot-classification `transformers.pipelines`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("optimum/distilbert-base-uncased-mnli")\n >>> model = {model_class}.from_pretrained("optimum/distilbert-base-uncased-mnli")\n >>> onnx_z0 = pipeline("zero-shot-classification", model=model, tokenizer=tokenizer)\n\n >>> sequence_to_classify = "Who are you voting for in 2020?"\n >>> candidate_labels = ["Europe", "public health", "politics", "elections"]\n >>> pred = onnx_z0(sequence_to_classify, candidate_labels, multi_label=True)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForSequenceClassification(ORTModel): auto_model_class = AutoModelForSequenceClassification @add_start_docstrings_to_model_forward(ONNX_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + SEQUENCE_CLASSIFICATION_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForSequenceClassification', checkpoint='optimum/distilbert-base-uncased-finetuned-sst-2-english')) def forward(self, input_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, token_type_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_ids, attention_mask, token_type_ids, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return SequenceClassifierOutput(logits=logits) TOKEN_CLASSIFICATION_EXAMPLE = '\n Example of token classification:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("My name is Philipp and I live in Germany.", return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> list(logits.shape)\n [1, 12, 9]\n ```\n\n Example using `transformers.pipelines`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_ner = pipeline("token-classification", model=model, tokenizer=tokenizer)\n\n >>> text = "My name is Philipp and I live in Germany."\n >>> pred = onnx_ner(text)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForTokenClassification(ORTModel): auto_model_class = AutoModelForTokenClassification @add_start_docstrings_to_model_forward(ONNX_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + TOKEN_CLASSIFICATION_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForTokenClassification', checkpoint='optimum/bert-base-NER')) def forward(self, input_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, token_type_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_ids, attention_mask, token_type_ids, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return TokenClassifierOutput(logits=logits) MULTIPLE_CHOICE_EXAMPLE = '\n Example of mutliple choice:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}", export=True)\n\n >>> num_choices = 4\n >>> first_sentence = ["Members of the procession walk down the street holding small horn brass instruments."] * num_choices\n >>> second_sentence = [\n ... "A drum line passes by walking down the street playing their instruments.",\n ... "A drum line has heard approaching them.",\n ... "A drum line arrives and they\'re outside dancing and asleep.",\n ... "A drum line turns the lead singer watches the performance."\n ... ]\n >>> inputs = tokenizer(first_sentence, second_sentence, truncation=True, padding=True)\n\n # Unflatten the inputs values expanding it to the shape [batch_size, num_choices, seq_length]\n >>> for k, v in inputs.items():\n ... inputs[k] = [v[i: i + num_choices] for i in range(0, len(v), num_choices)]\n >>> inputs = dict(inputs.convert_to_tensors(tensor_type="pt"))\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForMultipleChoice(ORTModel): auto_model_class = AutoModelForMultipleChoice @add_start_docstrings_to_model_forward(ONNX_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + MULTIPLE_CHOICE_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForMultipleChoice', checkpoint='ehdwns1516/bert-base-uncased_SWAG')) def forward(self, input_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, token_type_ids: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_ids, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_ids, attention_mask, token_type_ids, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return MultipleChoiceModelOutput(logits=logits) IMAGE_CLASSIFICATION_EXAMPLE = '\n Example of image classification:\n\n ```python\n >>> import requests\n >>> from PIL import Image\n >>> from optimum.onnxruntime import {model_class}\n >>> from transformers import {processor_class}\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> preprocessor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = preprocessor(images=image, return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> import requests\n >>> from PIL import Image\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> preprocessor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_image_classifier = pipeline("image-classification", model=model, feature_extractor=preprocessor)\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> pred = onnx_image_classifier(url)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForImageClassification(ORTModel): auto_model_class = AutoModelForImageClassification @add_start_docstrings_to_model_forward(ONNX_IMAGE_INPUTS_DOCSTRING.format('batch_size, num_channels, height, width') + IMAGE_CLASSIFICATION_EXAMPLE.format(processor_class=_FEATURE_EXTRACTOR_FOR_DOC, model_class='ORTModelForImageClassification', checkpoint='optimum/vit-base-patch16-224')) def forward(self, pixel_values: Union[torch.Tensor, np.ndarray], **kwargs): use_torch = isinstance(pixel_values, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(pixel_values, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'pixel_values': pixel_values} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return ImageClassifierOutput(logits=logits) SEMANTIC_SEGMENTATION_EXAMPLE = '\n Example of semantic segmentation:\n\n ```python\n >>> import requests\n >>> from PIL import Image\n >>> from optimum.onnxruntime import {model_class}\n >>> from transformers import {processor_class}\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> preprocessor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = preprocessor(images=image, return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> import requests\n >>> from PIL import Image\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> preprocessor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_image_segmenter = pipeline("image-segmentation", model=model, feature_extractor=preprocessor)\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> pred = onnx_image_segmenter(url)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForSemanticSegmentation(ORTModel): auto_model_class = AutoModelForSemanticSegmentation @add_start_docstrings_to_model_forward(ONNX_IMAGE_INPUTS_DOCSTRING.format('batch_size, num_channels, height, width') + SEMANTIC_SEGMENTATION_EXAMPLE.format(processor_class=_FEATURE_EXTRACTOR_FOR_DOC, model_class='ORTModelForSemanticSegmentation', checkpoint='optimum/segformer-b0-finetuned-ade-512-512')) def forward(self, pixel_values: Union[torch.Tensor, np.ndarray], **kwargs): use_torch = isinstance(pixel_values, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(pixel_values, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'pixel_values': pixel_values} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return SemanticSegmenterOutput(logits=logits) AUDIO_CLASSIFICATION_EXAMPLE = '\n Example of audio classification:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")\n >>> dataset = dataset.sort("id")\n >>> sampling_rate = dataset.features["audio"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")\n\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> predicted_class_ids = torch.argmax(logits, dim=-1).item()\n >>> predicted_label = model.config.id2label[predicted_class_ids]\n ```\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")\n >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")\n >>> dataset = dataset.sort("id")\n\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_ac = pipeline("audio-classification", model=model, feature_extractor=feature_extractor)\n\n >>> pred = onnx_ac(dataset[0]["audio"]["array"])\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForAudioClassification(ORTModel): auto_model_class = AutoModelForAudioClassification def __init__(self, model: ort.InferenceSession, config: 'PretrainedConfig', use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, **kwargs): super().__init__(model=model, config=config, use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors, **kwargs) if config.model_type == 'whisper': self.input_name = 'input_features' else: self.input_name = 'input_values' @add_start_docstrings_to_model_forward(ONNX_AUDIO_INPUTS_DOCSTRING.format('batch_size, sequence_length') + AUDIO_CLASSIFICATION_EXAMPLE.format(processor_class=_FEATURE_EXTRACTOR_FOR_DOC, model_class='ORTModelForAudioClassification', checkpoint='optimum/hubert-base-superb-ks')) def forward(self, input_values: Optional[Union[torch.Tensor, np.ndarray]]=None, attention_mask: Optional[Union[torch.Tensor, np.ndarray]]=None, input_features: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): if self.input_name == 'input_features': assert input_features is not None, 'input_features must be provided for this model' model_input = input_features elif self.input_name == 'input_values': assert input_values is not None, 'input_values must be provided for this model' model_input = input_values else: raise ValueError(f'Input {self.input_name} not supported for Audio Classification') use_torch = isinstance(model_input, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(model_input, attention_mask, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {self.input_name: model_input, 'attention_mask': attention_mask} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return SequenceClassifierOutput(logits=logits) CTC_EXAMPLE = '\n Example of CTC:\n\n ```python\n >>> from transformers import {processor_class}, HubertForCTC\n >>> from optimum.onnxruntime import {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")\n >>> dataset = dataset.sort("id")\n >>> sampling_rate = dataset.features["audio"].sampling_rate\n\n >>> processor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> # audio file is decoded on the fly\n >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n >>> predicted_ids = torch.argmax(logits, dim=-1)\n\n >>> transcription = processor.batch_decode(predicted_ids)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForCTC(ORTModel): auto_model_class = AutoModelForCTC @add_start_docstrings_to_model_forward(ONNX_AUDIO_INPUTS_DOCSTRING.format('batch_size, sequence_length') + CTC_EXAMPLE.format(processor_class=_PROCESSOR_FOR_DOC, model_class='ORTModelForCTC', checkpoint='optimum/hubert-large-ls960-ft')) def forward(self, input_values: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_values, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: input_size = input_values.shape[1] output_sizes = [] def _conv_output_size(input_size, kernel_size, stride): return (input_size - kernel_size) // stride + 1 for (kernel_size, stride) in zip(self.config.conv_kernel, self.config.conv_stride): input_size = _conv_output_size(input_size, kernel_size, stride) output_sizes.append(input_size) known_output_shapes = {'logits': [input_values.shape[0], output_sizes[-1], self.config.vocab_size]} (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_values, ordered_input_names=self._ordered_input_names, known_output_shapes=known_output_shapes) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) else: model_inputs = {'input_values': input_values} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return CausalLMOutput(logits=logits) AUDIO_XVECTOR_EXAMPLE = '\n Example of Audio XVector:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")\n >>> dataset = dataset.sort("id")\n >>> sampling_rate = dataset.features["audio"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(\n ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True\n ... )\n >>> with torch.no_grad():\n ... embeddings = model(**inputs).embeddings\n\n >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()\n\n >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n >>> similarity = cosine_sim(embeddings[0], embeddings[1])\n >>> threshold = 0.7\n >>> if similarity < threshold:\n ... print("Speakers are not the same!")\n >>> round(similarity.item(), 2)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForAudioXVector(ORTModel): auto_model_class = AutoModelForAudioXVector @add_start_docstrings_to_model_forward(ONNX_AUDIO_INPUTS_DOCSTRING.format('batch_size, sequence_length') + AUDIO_XVECTOR_EXAMPLE.format(processor_class=_FEATURE_EXTRACTOR_FOR_DOC, model_class='ORTModelForAudioXVector', checkpoint='optimum/wav2vec2-base-superb-sv')) def forward(self, input_values: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_values, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: (io_binding, output_shapes, output_buffers) = self.prepare_io_binding(input_values, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() logits = output_buffers['logits'].view(output_shapes['logits']) embeddings = output_buffers['embeddings'].view(output_shapes['embeddings']) else: model_inputs = {'input_values': input_values} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] embeddings = model_outputs['embeddings'] return XVectorOutput(logits=logits, embeddings=embeddings) AUDIO_FRAME_CLASSIFICATION_EXAMPLE = '\n Example of audio frame classification:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")\n >>> dataset = dataset.sort("id")\n >>> sampling_rate = dataset.features["audio"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate)\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> probabilities = torch.sigmoid(logits[0])\n >>> labels = (probabilities > 0.5).long()\n >>> labels[0].tolist()\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForAudioFrameClassification(ORTModel): auto_model_class = AutoModelForAudioFrameClassification @add_start_docstrings_to_model_forward(ONNX_AUDIO_INPUTS_DOCSTRING.format('batch_size, sequence_length') + AUDIO_FRAME_CLASSIFICATION_EXAMPLE.format(processor_class=_FEATURE_EXTRACTOR_FOR_DOC, model_class='ORTModelForAudioFrameClassification', checkpoint='optimum/wav2vec2-base-superb-sd')) def forward(self, input_values: Optional[Union[torch.Tensor, np.ndarray]]=None, **kwargs): use_torch = isinstance(input_values, torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: raise NotImplementedError() else: model_inputs = {'input_values': input_values} onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) logits = model_outputs['logits'] return TokenClassifierOutput(logits=logits) CUSTOM_TASKS_EXAMPLE = '\n Example of custom tasks(e.g. a sentence transformers taking `pooler_output` as output):\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("I love burritos!", return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> last_hidden_state = outputs.last_hidden_state\n >>> pooler_output = outputs.pooler_output\n ```\n\n Example using `transformers.pipelines`(only if the task is supported):\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_extractor = pipeline("feature-extraction", model=model, tokenizer=tokenizer)\n\n >>> text = "I love burritos!"\n >>> pred = onnx_extractor(text)\n ```\n' @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForCustomTasks(ORTModel): @add_start_docstrings_to_model_forward(CUSTOM_TASKS_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForCustomTasks', checkpoint='optimum/sbert-all-MiniLM-L6-with-pooler')) def forward(self, **model_inputs: Union[torch.Tensor, np.ndarray]): use_torch = isinstance(next(iter(model_inputs.values())), torch.Tensor) self.raise_on_numpy_input_io_binding(use_torch) if self.device.type == 'cuda' and self.use_io_binding: io_binding = IOBindingHelper.prepare_io_binding(self, **model_inputs) io_binding.synchronize_inputs() self.model.run_with_iobinding(io_binding) io_binding.synchronize_outputs() model_outputs = {} for (name, output) in zip(self.output_names.keys(), io_binding._iobinding.get_outputs()): model_outputs[name] = IOBindingHelper.to_pytorch(output) else: onnx_inputs = self._prepare_onnx_inputs(use_torch, **model_inputs) onnx_outputs = self.model.run(None, onnx_inputs) model_outputs = self._prepare_onnx_outputs(use_torch, *onnx_outputs) return ModelOutput(**model_outputs) # File: optimum-main/optimum/onnxruntime/modeling_seq2seq.py """""" import logging import shutil import warnings from abc import ABC, abstractmethod from pathlib import Path from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from huggingface_hub import hf_hub_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from transformers import AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, GenerationConfig, Pix2StructForConditionalGeneration, WhisperForConditionalGeneration from transformers.file_utils import add_end_docstrings, add_start_docstrings_to_model_forward from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput from transformers.models.auto.modeling_auto import MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES import onnxruntime as ort from ..exporters.onnx import main_export from ..onnx.utils import _get_external_data_paths from ..utils import check_if_transformers_greater from ..utils.file_utils import validate_file_exists from ..utils.normalized_config import NormalizedConfigManager from ..utils.save_utils import maybe_load_preprocessors, maybe_save_preprocessors from .base import ORTDecoderForSeq2Seq, ORTEncoder from .constants import DECODER_MERGED_ONNX_FILE_PATTERN, DECODER_ONNX_FILE_PATTERN, DECODER_WITH_PAST_ONNX_FILE_PATTERN, ENCODER_ONNX_FILE_PATTERN from .modeling_ort import ONNX_MODEL_END_DOCSTRING, ORTModel from .utils import ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME, ONNX_ENCODER_NAME, get_provider_for_device, parse_device, validate_provider_availability if check_if_transformers_greater('4.25.0'): from transformers.generation import GenerationMixin else: from transformers.generation_utils import GenerationMixin if check_if_transformers_greater('4.43.0'): from transformers.cache_utils import EncoderDecoderCache else: EncoderDecoderCache = dict from huggingface_hub.utils import EntryNotFoundError if TYPE_CHECKING: from transformers import PretrainedConfig logger = logging.getLogger(__name__) SEQ2SEQ_ENCODER_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor`):\n Indices of input sequence tokens in the vocabulary of shape `(batch_size, encoder_sequence_length)`.\n attention_mask (`torch.LongTensor`):\n Mask to avoid performing attention on padding token indices, of shape\n `(batch_size, encoder_sequence_length)`. Mask values selected in `[0, 1]`.\n' SPEECH_ENCODER_INPUTS_DOCSTRING = '\n Args:\n input_features (`torch.FloatTensor`):\n Mel / fbank features extracted from the raw speech waveform. `(batch_size, feature_size, encoder_sequence_length)`.\n' VISION_ENCODER_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor`):\n Features extracted from an Image. This tensor should be of shape `(batch_size, num_channels, height, width)`.\n' PIX2STRUCT_INPUTS_DOCSTRING = '\n Args:\n flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`):\n Flattened and padded pixel values.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Mask to avoid performing attention on padding pixel values.\n' DECODER_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, decoder_sequence_length)`.\n encoder_hidden_states (`torch.FloatTensor`):\n The encoder `last_hidden_state` of shape `(batch_size, encoder_sequence_length, hidden_size)`.\n encoder_attention_mask (`torch.LongTensor`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder `input_ids`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)` and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n' SEQ2SEQ_ONNX_MODEL_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor`):\n Indices of input sequence tokens in the vocabulary of shape `(batch_size, encoder_sequence_length)`.\n attention_mask (`torch.LongTensor`):\n Mask to avoid performing attention on padding token indices, of shape\n `(batch_size, encoder_sequence_length)`. Mask values selected in `[0, 1]`.\n decoder_input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, decoder_sequence_length)`.\n encoder_outputs (`torch.FloatTensor`):\n The encoder `last_hidden_state` of shape `(batch_size, encoder_sequence_length, hidden_size)`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)` and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n' SPEECH_SEQ2SEQ_ONNX_MODEL_DOCSTRING = '\n Args:\n input_features (`torch.FloatTensor`):\n Mel features extracted from the raw speech waveform.\n `(batch_size, feature_size, encoder_sequence_length)`.\n decoder_input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, decoder_sequence_length)`.\n encoder_outputs (`torch.FloatTensor`):\n The encoder `last_hidden_state` of shape `(batch_size, encoder_sequence_length, hidden_size)`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)` and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n' VISION_ENCODER_DECODER_SEQ2SEQ_ONNX_MODEL_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor`):\n Features extracted from an Image. This tensor should be of shape\n `(batch_size, num_channels, height, width)`.\n decoder_input_ids (`torch.LongTensor`):\n Indices of decoder input sequence tokens in the vocabulary of shape `(batch_size, decoder_sequence_length)`.\n encoder_outputs (`torch.FloatTensor`):\n The encoder `last_hidden_state` of shape `(batch_size, encoder_sequence_length, hidden_size)`.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)` and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n' PIX2STRUCT_ONNX_MODEL_DOCSTRING = '\n Args:\n flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`):\n Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` =\n `num_channels` * `patch_size` * `patch_size`\n The process of flattening the pixel patches is done by `Pix2StructProcessor`.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices.\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at\n the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor), *optional*, defaults to `None`)`\n Contains the precomputed key and value hidden states of the attention blocks used to speed up decoding.\n The tuple is of length `config.n_layers` with each tuple having 2 tensors of shape\n `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)` and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n' _TOKENIZER_FOR_DOC = 'AutoTokenizer' _PROCESSOR_FOR_DOC = 'AutoProcessor' _IMAGE_PROCESSER_FOR_DOC = 'AutoImageProcessor' TRANSLATION_EXAMPLE = '\n Example of text generation:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> inputs = tokenizer("My name is Eustache and I like to", return_tensors="pt")\n\n >>> gen_tokens = model.generate(**inputs)\n >>> outputs = tokenizer.batch_decode(gen_tokens)\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> onnx_translation = pipeline("translation_en_to_de", model=model, tokenizer=tokenizer)\n\n >>> text = "My name is Eustache."\n >>> pred = onnx_translation(text)\n ```\n' AUTOMATIC_SPEECH_RECOGNITION_EXAMPLE = '\n Example of text generation:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> from datasets import load_dataset\n\n >>> processor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n\n >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")\n >>> inputs = processor.feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")\n\n >>> gen_tokens = model.generate(inputs=inputs.input_features)\n >>> outputs = processor.tokenizer.batch_decode(gen_tokens)\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n >>> from datasets import load_dataset\n\n >>> processor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}")\n >>> speech_recognition = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor)\n\n >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")\n >>> pred = speech_recognition(ds[0]["audio"]["array"])\n ```\n' IMAGE_TO_TEXT_EXAMPLE = '\n Example of text generation:\n\n ```python\n >>> from transformers import {processor_class}, {tokenizer_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> from PIL import Image\n >>> import requests\n\n\n >>> processor = {processor_class}.from_pretrained("{checkpoint}")\n >>> tokenizer = {tokenizer_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}", export=True)\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n >>> inputs = processor(image, return_tensors="pt")\n\n >>> gen_tokens = model.generate(**inputs)\n >>> outputs = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)\n\n ```\n\n Example using `transformers.pipeline`:\n\n ```python\n >>> from transformers import {processor_class}, {tokenizer_class}, pipeline\n >>> from optimum.onnxruntime import {model_class}\n >>> from PIL import Image\n >>> import requests\n\n\n >>> processor = {processor_class}.from_pretrained("{checkpoint}")\n >>> tokenizer = {tokenizer_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}", export=True)\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_to_text = pipeline("image-to-text", model=model, tokenizer=tokenizer, feature_extractor=processor, image_processor=processor)\n >>> pred = image_to_text(image)\n ```\n' PIX2STRUCT_EXAMPLE = '\n Example of pix2struct:\n\n ```python\n >>> from transformers import {processor_class}\n >>> from optimum.onnxruntime import {model_class}\n >>> from PIL import Image\n >>> import requests\n\n >>> processor = {processor_class}.from_pretrained("{checkpoint}")\n >>> model = {model_class}.from_pretrained("{checkpoint}", export=True, use_io_binding=True)\n\n >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n >>> question = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud"\n >>> inputs = processor(images=image, text=question, return_tensors="pt")\n\n >>> gen_tokens = model.generate(**inputs)\n >>> outputs = processor.batch_decode(gen_tokens, skip_special_tokens=True)\n ```\n' class ORTEncoderForSpeech(ORTEncoder): @add_start_docstrings_to_model_forward(SPEECH_ENCODER_INPUTS_DOCSTRING) def forward(self, input_features: torch.FloatTensor, attention_mask: torch.LongTensor, **kwargs) -> BaseModelOutput: use_torch = isinstance(input_features, torch.Tensor) self.parent_model.raise_on_numpy_input_io_binding(use_torch) if self.parent_model.device.type == 'cuda' and self.parent_model.use_io_binding: model_inputs = [input_features, attention_mask] if 'attention_mask' in self.input_names else [input_features] (io_binding, output_shapes, output_buffers) = self.parent_model._prepare_io_binding(self.session, *model_inputs, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.session.run_with_iobinding(io_binding) io_binding.synchronize_outputs() last_hidden_state = output_buffers['last_hidden_state'].view(output_shapes['last_hidden_state']) else: if use_torch: onnx_inputs = {'input_features': input_features.cpu().detach().numpy()} if 'attention_mask' in self.input_names: onnx_inputs['attention_mask'] = attention_mask.cpu().detach().numpy() else: onnx_inputs = {'input_features': input_features} if 'attention_mask' in self.input_names: onnx_inputs['attention_mask'] = attention_mask if 'attention_mask' in self.input_names: if self.session.get_inputs()[1].type == 'tensor(int64)': onnx_inputs['attention_mask'] = onnx_inputs['attention_mask'].astype(np.int64) outputs = self.session.run(None, onnx_inputs) last_hidden_state = outputs[self.output_names['last_hidden_state']] if use_torch: last_hidden_state = torch.from_numpy(last_hidden_state).to(self.device) return BaseModelOutput(last_hidden_state=last_hidden_state) class ORTEncoderForVisionEncoderDecoder(ORTEncoder): @add_start_docstrings_to_model_forward(VISION_ENCODER_INPUTS_DOCSTRING) def forward(self, pixel_values: torch.FloatTensor, **kwargs) -> BaseModelOutput: use_torch = isinstance(pixel_values, torch.Tensor) self.parent_model.raise_on_numpy_input_io_binding(use_torch) if self.parent_model.device.type == 'cuda' and self.parent_model.use_io_binding: known_output_shapes = self.compute_encoder_known_output_shapes(pixel_values) (io_binding, output_shapes, output_buffers) = self.parent_model._prepare_io_binding(self.session, pixel_values, known_output_shapes=known_output_shapes, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.session.run_with_iobinding(io_binding) io_binding.synchronize_outputs() last_hidden_state = output_buffers['last_hidden_state'].view(output_shapes['last_hidden_state']) else: if use_torch: onnx_inputs = {'pixel_values': pixel_values.cpu().detach().numpy()} else: onnx_inputs = {'pixel_values': pixel_values} outputs = self.session.run(None, onnx_inputs) last_hidden_state = outputs[self.output_names['last_hidden_state']] if use_torch: last_hidden_state = torch.from_numpy(last_hidden_state).to(self.device) return BaseModelOutput(last_hidden_state=last_hidden_state) def compute_encoder_known_output_shapes(self, pixel_values: torch.FloatTensor) -> Dict[str, List[int]]: if self.normalized_config.config.model_type == 'donut-swin': encoder_sequence_length = self.normalized_config.config.image_size[0] * self.normalized_config.config.image_size[1] // self.normalized_config.config.hidden_size elif self.normalized_config.config.model_type in ['vit', 'deit']: return None else: raise ValueError(f'Unsupported encoder model type {self.normalized_config.config.model_type} for ORTForVisionSeq2Seq with IOBinding.Currently supported models are vit, donut-swin and deit.Please submit a PR to add support for this model type.') return {'last_hidden_state': [pixel_values.shape[0], encoder_sequence_length, self.normalized_config.config.hidden_size]} class ORTEncoderForPix2Struct(ORTEncoder): @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING) def forward(self, flattened_patches: torch.FloatTensor, attention_mask: torch.LongTensor, **kwargs) -> BaseModelOutput: use_torch = isinstance(flattened_patches, torch.Tensor) self.parent_model.raise_on_numpy_input_io_binding(use_torch) if self.parent_model.device.type == 'cuda' and self.parent_model.use_io_binding: model_inputs = [flattened_patches, attention_mask] if 'attention_mask' in self.input_names else [flattened_patches] (io_binding, output_shapes, output_buffers) = self.parent_model._prepare_io_binding(self.session, *model_inputs, ordered_input_names=self._ordered_input_names) io_binding.synchronize_inputs() self.session.run_with_iobinding(io_binding) io_binding.synchronize_outputs() last_hidden_state = output_buffers['last_hidden_state'].view(output_shapes['last_hidden_state']) else: if use_torch: onnx_inputs = {'flattened_patches': flattened_patches.cpu().detach().numpy()} if 'attention_mask' in self.input_names: onnx_inputs['attention_mask'] = attention_mask.cpu().detach().numpy() else: onnx_inputs = {'flattened_patches': flattened_patches} if 'attention_mask' in self.input_names: onnx_inputs['attention_mask'] = attention_mask if 'attention_mask' in self.input_names: if self.session.get_inputs()[1].type == 'tensor(int64)': onnx_inputs['attention_mask'] = onnx_inputs['attention_mask'].astype(np.int64) outputs = self.session.run(None, onnx_inputs) last_hidden_state = outputs[self.output_names['last_hidden_state']] if use_torch: last_hidden_state = torch.from_numpy(last_hidden_state).to(self.device) return BaseModelOutput(last_hidden_state=last_hidden_state) class ORTModelForConditionalGeneration(ORTModel, ABC): base_model_prefix = 'onnx_model' _supports_cache_class = False def __init__(self, encoder_session: ort.InferenceSession, decoder_session: ort.InferenceSession, config: 'PretrainedConfig', onnx_paths: List[str], decoder_with_past_session: Optional[ort.InferenceSession]=None, use_cache: bool=True, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, generation_config: Optional[GenerationConfig]=None, **kwargs): def show_deprecated_argument(arg_name): if kwargs.pop(arg_name, None) is not None: logger.warning(f'The {arg_name} argument to create an {self.__class__.__name__} is deprecated, and not used anymore.') show_deprecated_argument('last_encoder_model_name') show_deprecated_argument('last_decoder_model_name') show_deprecated_argument('last_decoder_with_past_model_name') if kwargs: raise ValueError(f"{self.__class__.__name__} received {', '.join(kwargs.keys())}, but do not accept those arguments.") ABC.__init__(self) if use_io_binding is None: if decoder_session.get_providers()[0] == 'CUDAExecutionProvider': use_io_binding = True else: use_io_binding = False self.shared_attributes_init(encoder_session, use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors) self.config = config self.name_or_path = config.name_or_path self.onnx_paths = onnx_paths self.use_cache = use_cache if use_cache is True: use_merged = 'use_cache_branch' in [inp.name for inp in decoder_session.get_inputs()] if use_merged is True and decoder_with_past_session is not None: raise ValueError('Detected a merged decoder, but decoder_with_past_session was provided.Please only set decoder_session, or provide a non-merged decoder_session.') if use_cache is True and use_merged is False and (decoder_with_past_session is None): raise ValueError('The parameter use_cache was set as True, but neither decoder_with_past_session was passed nor a use_cache branch can be found in the decoder_session. Please pass a decoder_with_past_session or set use_cache=False.') else: use_merged = False if decoder_with_past_session is not None: raise ValueError('The parameter decoder_with_past_session was passed, although use_cache is False.Please pass use_cache=True for decoder_with_past_session to be used.') if use_cache is False and use_io_binding is True: raise ValueError('When using CUDAExecutionProvider, the parameters combination use_cache=False, use_io_binding=True is not supported. Please either pass use_cache=True, use_io_binding=True (default), or use_cache=False, use_io_binding=False.') self.use_merged = use_merged self.encoder = self._initialize_encoder(encoder_session) self.encoder_model_path = Path(encoder_session._model_path) self.encoder_model_name = self.encoder_model_path.name self.decoder = ORTDecoderForSeq2Seq(decoder_session, self) self.decoder_model_path = Path(decoder_session._model_path) self.decoder_model_name = self.decoder_model_path.name self.decoder_with_past = None self.decoder_with_past_model_path = None self.decoder_with_past_model_name = None if self.use_cache is True and self.use_merged is False: self.decoder_with_past = ORTDecoderForSeq2Seq(decoder_with_past_session, self) self.decoder_with_past_model_path = Path(decoder_with_past_session._model_path) self.decoder_with_past_model_name = self.decoder_with_past_model_path.name if generation_config is None: generation_config = GenerationConfig.from_model_config(config) self.generation_config = generation_config @abstractmethod def _initialize_encoder(self, session: ort.InferenceSession) -> ORTEncoder: pass @staticmethod def load_model(encoder_path: Union[str, Path], decoder_path: Union[str, Path], decoder_with_past_path: Optional[Union[str, Path]]=None, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict]=None): encoder_session = ORTModel.load_model(encoder_path, provider, session_options, provider_options) decoder_session = ORTModel.load_model(decoder_path, provider, session_options, provider_options) decoder_with_past_session = None if decoder_with_past_path is not None: decoder_with_past_session = ORTModel.load_model(decoder_with_past_path, provider, session_options, provider_options) return (encoder_session, decoder_session, decoder_with_past_session) def _save_pretrained(self, save_directory: Union[str, Path]): save_directory = Path(save_directory) src_paths = [Path(path) for path in self.onnx_paths] dst_paths = [save_directory / path.name for path in src_paths] (src_paths, dst_paths) = _get_external_data_paths(src_paths, dst_paths) for (src_path, dst_path) in zip(src_paths, dst_paths): shutil.copyfile(src_path, dst_path) self.generation_config.save_pretrained(save_directory) @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, force_download: bool=False, cache_dir: str=HUGGINGFACE_HUB_CACHE, encoder_file_name: str=ONNX_ENCODER_NAME, decoder_file_name: str=ONNX_DECODER_NAME, decoder_with_past_file_name: str=ONNX_DECODER_WITH_PAST_NAME, subfolder: str='', local_files_only: bool=False, use_cache: bool=True, use_merged: Optional[bool]=None, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, **kwargs): if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token model_path = Path(model_id) if use_cache is False: if use_merged is True: raise ValueError('The parameters combination use_cache=False, use_merged=True is not supported. To use a merged decoder, past key values must be used.') use_merged = False decoder_merged_path = None if use_merged is not False: try: decoder_merged_path = ORTModelForConditionalGeneration.infer_onnx_filename(model_id, [DECODER_MERGED_ONNX_FILE_PATTERN], argument_name=None, subfolder=subfolder, token=token, revision=revision) use_merged = True decoder_path = decoder_merged_path except FileNotFoundError as e: if use_merged is True: raise FileNotFoundError(f'The parameter `use_merged=True` was passed to ORTModelForCausalLM.from_pretrained() but no ONNX file for a merged decoder could be found in {str(Path(model_id, subfolder))}, with the error: {e}') use_merged = False decoder_without_past_path = None decoder_with_past_path = None if use_merged is False: if not validate_file_exists(model_id, decoder_file_name, subfolder=subfolder, revision=revision): decoder_without_past_path = ORTModelForConditionalGeneration.infer_onnx_filename(model_id, [DECODER_ONNX_FILE_PATTERN], 'decoder_file_name', subfolder=subfolder, token=token, revision=revision) else: decoder_without_past_path = model_path / subfolder / decoder_file_name decoder_path = decoder_without_past_path decoder_regular_onnx_filenames = ORTModelForConditionalGeneration._generate_regular_names_for_filename(ONNX_DECODER_NAME) if decoder_path.name not in decoder_regular_onnx_filenames: logger.warning(f'The ONNX file {decoder_path.name} is not a regular name used in optimum.onnxruntime that are {decoder_regular_onnx_filenames}, the {cls.__name__} might not behave as expected.') if use_cache is True and use_merged is False: if not validate_file_exists(model_id, decoder_with_past_file_name, subfolder=subfolder, revision=revision): try: decoder_with_past_path = ORTModelForConditionalGeneration.infer_onnx_filename(model_id, [DECODER_WITH_PAST_ONNX_FILE_PATTERN], 'decoder_with_past_file_name', subfolder=subfolder, token=token, revision=revision) except FileNotFoundError as e: raise FileNotFoundError(f'The parameter `use_cache=True` was passed to ORTModelForCausalLM.from_pretrained() but no ONNX file using past key values could be found in {str(Path(model_id, subfolder))}, with the error: {e}') else: decoder_with_past_path = model_path / subfolder / decoder_with_past_file_name decoder_path = decoder_without_past_path decoder_with_past_regular_onnx_filenames = ORTModelForConditionalGeneration._generate_regular_names_for_filename(ONNX_DECODER_WITH_PAST_NAME) if decoder_with_past_path.name not in decoder_with_past_regular_onnx_filenames: logger.warning(f'The ONNX file {decoder_with_past_path.name} is not a regular name used in optimum.onnxruntime that are {decoder_with_past_regular_onnx_filenames}, the {cls.__name__} might not behave as expected.') if not validate_file_exists(model_id, encoder_file_name, subfolder=subfolder, revision=revision): encoder_path = ORTModelForConditionalGeneration.infer_onnx_filename(model_id, [ENCODER_ONNX_FILE_PATTERN], 'encoder_file_name', subfolder=subfolder, token=token, revision=revision) else: encoder_path = model_path / subfolder / encoder_file_name encoder_regular_onnx_filenames = ORTModelForConditionalGeneration._generate_regular_names_for_filename(ONNX_ENCODER_NAME) if encoder_path.name not in encoder_regular_onnx_filenames: logger.warning(f'The ONNX file {encoder_path.name} is not a regular name used in optimum.onnxruntime, the ORTModelForConditionalGeneration might not behave as expected.') preprocessors = None if model_path.is_dir(): new_model_save_dir = model_path preprocessors = maybe_load_preprocessors(model_id) else: attribute_name_to_filename = {'last_encoder_model_name': encoder_path.name, 'last_decoder_model_name': decoder_path.name if use_merged is False else None, 'last_decoder_with_past_model_name': decoder_with_past_path.name if use_merged is False and use_cache is True else None, 'last_decoder_merged_name': decoder_merged_path.name if use_merged is True else None} paths = {} for (attr_name, filename) in attribute_name_to_filename.items(): if filename is None: continue model_cache_path = hf_hub_download(repo_id=model_id, subfolder=subfolder, filename=filename, token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, local_files_only=local_files_only) try: hf_hub_download(repo_id=model_id, subfolder=subfolder, filename=filename + '_data', token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, local_files_only=local_files_only) except EntryNotFoundError: pass paths[attr_name] = Path(model_cache_path).name new_model_save_dir = Path(model_cache_path).parent preprocessors = maybe_load_preprocessors(model_id, subfolder=subfolder) if use_merged is True: decoder_path = new_model_save_dir / paths['last_decoder_merged_name'] decoder_merged_path = new_model_save_dir / paths['last_decoder_merged_name'] else: decoder_path = new_model_save_dir / paths['last_decoder_model_name'] decoder_without_past_path = new_model_save_dir / paths['last_decoder_model_name'] if use_cache is True: decoder_with_past_path = new_model_save_dir / paths['last_decoder_with_past_model_name'] encoder_path = new_model_save_dir / paths['last_encoder_model_name'] ort_inference_sessions = cls.load_model(encoder_path=encoder_path, decoder_path=decoder_path, decoder_with_past_path=None if use_merged is True or use_cache is False else decoder_with_past_path, provider=provider, session_options=session_options, provider_options=provider_options) if model_save_dir is None: model_save_dir = new_model_save_dir generation_config = None try: generation_config = GenerationConfig.from_pretrained(model_id, cache_dir=cache_dir, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder) except OSError: logger.info('Generation config file not found, using a generation config created from the model config.') onnx_paths = [encoder_path] if use_merged is False: onnx_paths.append(decoder_without_past_path) if use_cache is True: onnx_paths.append(decoder_with_past_path) else: onnx_paths.append(decoder_merged_path) return cls(*ort_inference_sessions[:2], config, onnx_paths=onnx_paths, use_cache=use_cache, decoder_with_past_session=ort_inference_sessions[2], use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors, generation_config=generation_config) @classmethod def _from_transformers(cls, model_id: str, config: 'PretrainedConfig', use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None, revision: str='main', force_download: bool=True, cache_dir: str=HUGGINGFACE_HUB_CACHE, subfolder: str='', local_files_only: bool=False, trust_remote_code: bool=False, use_cache: bool=True, use_merged: bool=False, provider: str='CPUExecutionProvider', session_options: Optional[ort.SessionOptions]=None, provider_options: Optional[Dict[str, Any]]=None, use_io_binding: Optional[bool]=None, task: Optional[str]=None) -> 'ORTModelForConditionalGeneration': if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if use_cache is False and use_merged is True: raise ValueError('The incompatible arguments use_cache=False, use_merged=True were passed to ORTModelForConditionalGeneration.from_pretrained(). Please pass either use_cache=False, use_merged=False to disable past key value caching, or use_cache=True, use_merged=False to disable the merging of the decoder not using / using past key and value.') if task is None: task = cls._auto_model_to_task(cls.auto_model_class) if use_cache is True: task = task + '-with-past' save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) main_export(model_name_or_path=model_id, output=save_dir_path, task=task, do_validation=False, no_post_process=not use_merged, subfolder=subfolder, revision=revision, cache_dir=cache_dir, token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code) config.save_pretrained(save_dir_path) maybe_save_preprocessors(model_id, save_dir_path, src_subfolder=subfolder) return cls._from_pretrained(save_dir_path, config, use_cache=use_cache, use_merged=use_merged, provider=provider, session_options=session_options, provider_options=provider_options, use_io_binding=use_io_binding, model_save_dir=save_dir) @property def dtype(self) -> torch.dtype: return self.encoder.dtype or self.decoder.dtype def to(self, device: Union[torch.device, str, int]): (device, provider_options) = parse_device(device) if device.type == 'cuda' and self.providers[0] == 'TensorrtExecutionProvider': return self provider = get_provider_for_device(device) validate_provider_availability(provider) self.encoder.session.set_providers([provider], provider_options=[provider_options]) self.decoder.session.set_providers([provider], provider_options=[provider_options]) if self.decoder_with_past is not None: self.decoder_with_past.session.set_providers([provider], provider_options=[provider_options]) self.providers = self.encoder.session.get_providers() self._device = device return self @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForSeq2SeqLM(ORTModelForConditionalGeneration, GenerationMixin): auto_model_class = AutoModelForSeq2SeqLM main_input_name = 'input_ids' def __init__(self, encoder_session: ort.InferenceSession, decoder_session: ort.InferenceSession, config: 'PretrainedConfig', onnx_paths: List[str], decoder_with_past_session: Optional[ort.InferenceSession]=None, use_cache: bool=True, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, generation_config: Optional[GenerationConfig]=None, **kwargs): super().__init__(encoder_session, decoder_session, config, onnx_paths, decoder_with_past_session, use_cache, use_io_binding, model_save_dir, preprocessors, generation_config, **kwargs) if config.model_type == 'encoder-decoder': self.encoder.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.encoder.model_type)(config.encoder) self.decoder.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.decoder.model_type)(config.decoder) if self.decoder_with_past is not None: self.decoder_with_past.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.decoder.model_type)(config.decoder) def _initialize_encoder(self, session: ort.InferenceSession) -> ORTEncoder: return ORTEncoder(session, self) @add_start_docstrings_to_model_forward(SEQ2SEQ_ONNX_MODEL_DOCSTRING + TRANSLATION_EXAMPLE.format(processor_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForSeq2SeqLM', checkpoint='optimum/t5-small')) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> Seq2SeqLMOutput: if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask) model = self.decoder if past_key_values is None or not self.use_cache or self.use_merged else self.decoder_with_past decoder_outputs = model(input_ids=decoder_input_ids, past_key_values=past_key_values, encoder_hidden_states=encoder_outputs.last_hidden_state, encoder_attention_mask=attention_mask, labels=labels) return Seq2SeqLMOutput(loss=decoder_outputs.get('loss', None), logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, token_type_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs) -> Dict: if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'decoder_input_ids': input_ids, 'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def get_encoder(self) -> ORTEncoder: return self.encoder @staticmethod def _reorder_cache(past, beam_idx) -> Tuple[Tuple[torch.FloatTensor]]: reordered_past = () for layer_past in past: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForSpeechSeq2Seq(ORTModelForConditionalGeneration, GenerationMixin): auto_model_class = AutoModelForSpeechSeq2Seq main_input_name = 'input_features' def __init__(self, encoder_session: ort.InferenceSession, decoder_session: ort.InferenceSession, config: 'PretrainedConfig', onnx_paths: List[str], decoder_with_past_session: Optional[ort.InferenceSession]=None, use_cache: bool=True, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, generation_config: Optional[GenerationConfig]=None, **kwargs): super().__init__(encoder_session=encoder_session, decoder_session=decoder_session, config=config, onnx_paths=onnx_paths, decoder_with_past_session=decoder_with_past_session, use_cache=use_cache, use_io_binding=use_io_binding, model_save_dir=model_save_dir, preprocessors=preprocessors, generation_config=generation_config, **kwargs) MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES['ort_speechseq2seq'] = self.__class__.__name__ def _initialize_encoder(self, session: ort.InferenceSession) -> ORTEncoder: return ORTEncoderForSpeech(session, self) @add_start_docstrings_to_model_forward(SPEECH_SEQ2SEQ_ONNX_MODEL_DOCSTRING + AUTOMATIC_SPEECH_RECOGNITION_EXAMPLE.format(processor_class=_PROCESSOR_FOR_DOC, model_class='ORTModelForSpeechSeq2Seq', checkpoint='optimum/whisper-tiny.en')) def forward(self, input_features: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Seq2SeqLMOutput: if encoder_outputs is None: encoder_outputs = self.encoder(input_features=input_features, attention_mask=attention_mask) model = self.decoder if past_key_values is None or not self.use_cache or self.use_merged else self.decoder_with_past decoder_outputs = model(input_ids=decoder_input_ids, past_key_values=past_key_values, encoder_hidden_states=encoder_outputs.last_hidden_state, encoder_attention_mask=attention_mask, cache_position=cache_position, labels=labels) return Seq2SeqLMOutput(loss=decoder_outputs.get('loss', None), logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return {'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def get_encoder(self) -> ORTEncoder: return self.encoder @staticmethod def _reorder_cache(past, beam_idx) -> Tuple[Tuple[torch.FloatTensor]]: reordered_past = () for layer_past in past: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: 'PretrainedConfig', **kwargs): if 'WhisperForConditionalGeneration' in config.architectures: return _ORTModelForWhisper._from_pretrained(model_id, config, **kwargs) else: return super()._from_pretrained(model_id, config, **kwargs) class _ORTModelForWhisper(ORTModelForSpeechSeq2Seq, WhisperForConditionalGeneration): auto_model_class = WhisperForConditionalGeneration prepare_inputs_for_generation = WhisperForConditionalGeneration.prepare_inputs_for_generation generate = WhisperForConditionalGeneration.generate @classmethod def _from_pretrained(cls, model_id: Union[str, Path], config: 'PretrainedConfig', **kwargs): return super(ORTModelForSpeechSeq2Seq, cls)._from_pretrained(model_id, config, **kwargs) class DummyWhisperModel: def __init__(self): self.encoder = self.Encoder() class Encoder: def __init__(self): self.conv1 = self.Conv(stride=(1,)) self.conv2 = self.Conv(stride=(2,)) class Conv: def __init__(self, stride): self.stride = stride model = DummyWhisperModel() @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForVision2Seq(ORTModelForConditionalGeneration, GenerationMixin): auto_model_class = AutoModelForVision2Seq main_input_name = 'pixel_values' def __init__(self, encoder_session: ort.InferenceSession, decoder_session: ort.InferenceSession, config: 'PretrainedConfig', onnx_paths: List[str], decoder_with_past_session: Optional[ort.InferenceSession]=None, use_cache: bool=True, use_io_binding: Optional[bool]=None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]]=None, preprocessors: Optional[List]=None, generation_config: Optional[GenerationConfig]=None, **kwargs): if config.decoder.model_type == 'gpt2': self.no_cross_attention_cache = True super().__init__(encoder_session, decoder_session, config, onnx_paths, decoder_with_past_session, use_cache, use_io_binding, model_save_dir, preprocessors, generation_config, **kwargs) self.encoder.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.encoder.model_type)(config.encoder) self.decoder.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.decoder.model_type)(config.decoder) if self.decoder_with_past is not None: self.decoder_with_past.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.decoder.model_type)(config.decoder) def _initialize_encoder(self, session: ort.InferenceSession) -> ORTEncoder: return ORTEncoderForVisionEncoderDecoder(session, self) @add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_SEQ2SEQ_ONNX_MODEL_DOCSTRING + IMAGE_TO_TEXT_EXAMPLE.format(processor_class=_IMAGE_PROCESSER_FOR_DOC, tokenizer_class=_TOKENIZER_FOR_DOC, model_class='ORTModelForVision2Seq', checkpoint='nlpconnect/vit-gpt2-image-captioning')) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> Seq2SeqLMOutput: if encoder_outputs is None: encoder_outputs = self.encoder(pixel_values=pixel_values) model = self.decoder if past_key_values is None or not self.use_cache or self.use_merged else self.decoder_with_past decoder_outputs = model(input_ids=decoder_input_ids, past_key_values=past_key_values, encoder_hidden_states=encoder_outputs.last_hidden_state, labels=labels) return Seq2SeqLMOutput(loss=decoder_outputs.get('loss', None), logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs) -> Dict: if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'decoder_input_ids': input_ids, 'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def get_encoder(self) -> ORTEncoder: return self.encoder @staticmethod def _reorder_cache(past, beam_idx) -> Tuple[Tuple[torch.FloatTensor]]: reordered_past = () for layer_past in past: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past @add_end_docstrings(ONNX_MODEL_END_DOCSTRING) class ORTModelForPix2Struct(ORTModelForConditionalGeneration, GenerationMixin): auto_model_class = Pix2StructForConditionalGeneration main_input_name = 'flattened_patches' def _initialize_encoder(self, session: ort.InferenceSession) -> ORTEncoder: return ORTEncoderForPix2Struct(session, self) @add_start_docstrings_to_model_forward(PIX2STRUCT_ONNX_MODEL_DOCSTRING + PIX2STRUCT_EXAMPLE.format(processor_class=_PROCESSOR_FOR_DOC, model_class='ORTModelForPix2Struct', checkpoint='google/pix2struct-ai2d-base')) def forward(self, flattened_patches: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> Seq2SeqLMOutput: if encoder_outputs is None: encoder_outputs = self.encoder(flattened_patches=flattened_patches, attention_mask=attention_mask) if isinstance(attention_mask, torch.Tensor): attention_mask = attention_mask.to(torch.int64) else: attention_mask = attention_mask.astype(np.int64) model = self.decoder if past_key_values is None or not self.use_cache or self.use_merged else self.decoder_with_past decoder_outputs = model(input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, encoder_hidden_states=encoder_outputs.last_hidden_state, encoder_attention_mask=attention_mask, labels=labels) return Seq2SeqLMOutput(loss=decoder_outputs.get('loss', None), logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values) def prepare_inputs_for_generation(self, input_ids, flattened_patches: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs) -> Dict: if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if decoder_attention_mask is None: decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device) return {'flattened_patches': flattened_patches, 'decoder_input_ids': input_ids, 'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def get_encoder(self) -> ORTEncoder: return self.encoder @staticmethod def _reorder_cache(past, beam_idx) -> Tuple[Tuple[torch.FloatTensor]]: ORTModelForSeq2SeqLM._reorder_cache(past, beam_idx) # File: optimum-main/optimum/onnxruntime/models/bloom.py from typing import TYPE_CHECKING, Tuple if TYPE_CHECKING: import torch def bloom_convert_to_standard_cache(past_key_value: Tuple[Tuple['torch.Tensor', 'torch.Tensor']], batch_size: int) -> Tuple[Tuple['torch.Tensor', 'torch.Tensor']]: (batch_size_times_num_heads, head_dim, seq_length) = past_key_value[0][0].shape num_heads = batch_size_times_num_heads // batch_size return tuple(((layer_past[0].view(batch_size, num_heads, head_dim, seq_length), layer_past[1].view(batch_size, num_heads, seq_length, head_dim)) for layer_past in past_key_value)) def bloom_convert_to_bloom_cache(past_key_value: Tuple[Tuple['torch.Tensor', 'torch.Tensor']]) -> Tuple[Tuple['torch.Tensor', 'torch.Tensor']]: (batch_size, num_heads, head_dim, seq_length) = past_key_value[0][0].shape batch_size_times_num_heads = batch_size * num_heads return tuple(((layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim)) for layer_past in past_key_value)) # File: optimum-main/optimum/onnxruntime/optimization.py """""" import gc import os from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Union import onnx from onnx import load_model from transformers.models.auto.configuration_auto import AutoConfig from onnxruntime.transformers.onnx_model_bert import BertOnnxModel from onnxruntime.transformers.optimizer import optimize_model from ..onnx.utils import check_model_uses_external_data from ..utils import CONFIG_NAME, NormalizedConfigManager, logging from ..utils.save_utils import maybe_save_preprocessors from .configuration import OptimizationConfig, ORTConfig from .modeling_decoder import ORTModelForCausalLM from .modeling_ort import ORTModel from .modeling_seq2seq import ORTModelForConditionalGeneration from .utils import ONNX_WEIGHTS_NAME, ORTConfigManager if TYPE_CHECKING: from transformers import PretrainedConfig logger = logging.get_logger() class ORTOptimizer: def __init__(self, onnx_model_path: List[os.PathLike], config: 'PretrainedConfig', from_ortmodel: bool=False): super().__init__() self.onnx_model_path = onnx_model_path self.config = config self.model_type = self.config.model_type self.from_ortmodel = from_ortmodel try: self.normalized_config = NormalizedConfigManager.get_normalized_config_class(self.model_type)(self.config) except KeyError: raise NotImplementedError(f'Tried to use ORTOptimizer for the model type {self.model_type}, but it is not available yet. Please open an issue or submit a PR at https://github.com/huggingface/optimum.') @classmethod def from_pretrained(cls, model_or_path: Union[str, os.PathLike, ORTModel], file_names: Optional[List[str]]=None) -> 'ORTOptimizer': onnx_model_path = [] config = None if isinstance(model_or_path, ORTModel): from_ortmodel = True if isinstance(model_or_path, ORTModelForConditionalGeneration): onnx_model_path += [model_or_path.encoder_model_path, model_or_path.decoder_model_path] if model_or_path.use_cache: onnx_model_path.append(model_or_path.decoder_with_past_model_path) elif isinstance(model_or_path, ORTModelForCausalLM) and model_or_path.use_merged: raise NotImplementedError('ORTOptimizer does not support ORTModelForCausalLM models when without/with past models are merged. Please re-export your model. This can be done by using the optimum-cli ONNX export tool or `ORTModelForCausalLM.from_pretrained(..., export=True, use_merged=False)`.') else: onnx_model_path.append(model_or_path.model_path) config = model_or_path.config elif os.path.isdir(model_or_path): from_ortmodel = False file_names = [ONNX_WEIGHTS_NAME] if file_names is None else file_names model_or_path = Path(model_or_path) if CONFIG_NAME not in os.listdir(model_or_path): raise ValueError(f'The local directory does not contain the configuration file {CONFIG_NAME}.') config = AutoConfig.from_pretrained(model_or_path) for file_name in file_names: onnx_model_path.append(model_or_path.joinpath(file_name)) else: raise ValueError(f'Unable to load the model from {model_or_path}.') return cls(onnx_model_path, config=config, from_ortmodel=from_ortmodel) def optimize(self, optimization_config: OptimizationConfig, save_dir: Union[str, os.PathLike], file_suffix: Optional[str]='optimized', use_external_data_format: Optional[bool]=None, one_external_file: bool=True): if use_external_data_format is not None: logger.warning('The argument use_external_data_format in the ORTOptimizer.optimize() method is deprecated and will be removed in optimum 2.0.') save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) ORTConfigManager.check_optimization_supported_model(self.model_type, optimization_config) self.config.save_pretrained(save_dir) maybe_save_preprocessors(self.onnx_model_path[0].parent, save_dir) model_type = ORTConfigManager.get_model_ort_type(self.config.model_type) optimization_options = optimization_config.create_fusion_options(model_type) logger.info('Optimizing model...') model_uses_external_data = False for model_path in self.onnx_model_path: onnx_model = onnx.load(str(model_path), load_external_data=False) if check_model_uses_external_data(onnx_model) is True: model_uses_external_data = True break del onnx_model gc.collect() ort_config = ORTConfig(optimization=optimization_config, use_external_data_format=model_uses_external_data, one_external_file=one_external_file) for model_path in self.onnx_model_path: suffix = f'_{file_suffix}' if file_suffix else '' output_path = save_dir.joinpath(f'{model_path.stem}{suffix}').with_suffix(model_path.suffix) try: optimizer = optimize_model(model_path.as_posix(), model_type, self.normalized_config.num_attention_heads, self.normalized_config.hidden_size, opt_level=optimization_config.optimization_level, optimization_options=optimization_options, use_gpu=optimization_config.optimize_for_gpu, only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations) if optimization_config.fp16: if model_uses_external_data: optimizer.save_model_to_file(output_path.as_posix(), use_external_data_format=model_uses_external_data, all_tensors_to_one_file=one_external_file) optimizer.model = output_path.as_posix() optimizer.convert_float_to_float16(use_symbolic_shape_infer=not optimization_config.disable_shape_inference, keep_io_types=True) except Exception as e: if 'Incomplete symbolic shape inference' in str(e): err = RuntimeError(f'{str(e)}. Try to set `disable_shape_inference=True` in your optimization configuration.') raise err from e raise optimizer.save_model_to_file(output_path.as_posix(), use_external_data_format=model_uses_external_data, all_tensors_to_one_file=one_external_file) if Path(model_path.as_posix() + '_data').is_file() and self.from_ortmodel is False: os.remove(model_path.as_posix() + '_data') self.config.save_pretrained(save_dir) ort_config.save_pretrained(save_dir) logger.info(f'Optimized model saved at: {save_dir} (external data format: {model_uses_external_data}; saved all tensor to one file: {one_external_file})') return Path(save_dir) @staticmethod def get_fused_operators(onnx_model_path: Union[str, os.PathLike]) -> Dict[str, int]: onnx_optimized_model = BertOnnxModel(load_model(onnx_model_path)) fused_operator = onnx_optimized_model.get_fused_operator_statistics() logger.info(f"The following operators were fused : {', '.join([k for (k, v) in fused_operator.items() if v > 0])}") return {k: v for (k, v) in fused_operator.items() if v > 0} @staticmethod def get_nodes_number_difference(onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]) -> int: onnx_model = BertOnnxModel(load_model(onnx_model_path)) onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path)) nodes_number_onnx_model = len(onnx_model.nodes()) nodes_number_onnx_optimized_model = len(onnx_optimized_model.nodes()) difference_nodes_number = nodes_number_onnx_model - nodes_number_onnx_optimized_model logger.info(f'There are {nodes_number_onnx_model} nodes before optimization and {nodes_number_onnx_optimized_model}nodes after. The number of nodes removed is {difference_nodes_number}') return difference_nodes_number @staticmethod def get_operators_difference(onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]) -> Dict[str, int]: onnx_model = BertOnnxModel(load_model(onnx_model_path)) onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path)) def nodes_difference_given_type(op_type): onnx_model_nodes_with_op_type = len(onnx_model.get_nodes_by_op_type(op_type)) onnx_optimized_model_nodes_with_op_type = len(onnx_optimized_model.get_nodes_by_op_type(op_type)) return onnx_model_nodes_with_op_type - onnx_optimized_model_nodes_with_op_type op_types = set() for model in [onnx_model, onnx_optimized_model]: for node in model.nodes(): op_types.add(node.op_type) operators_difference = {op_type: nodes_difference_given_type(op_type) for op_type in op_types} return {k: v for (k, v) in operators_difference.items() if v != 0} # File: optimum-main/optimum/onnxruntime/preprocessors/passes/excluders.py from typing import Set, Tuple from onnx import ModelProto from onnxruntime.transformers.onnx_model import OnnxModel from .. import PreprocessorPass class ExcludeNodeFollowedBy(PreprocessorPass): def __init__(self, operator_type_to_exclude: str, following_operator_type: str): super().__init__() self.operator_type_to_exclude = operator_type_to_exclude self.following_operator_type = following_operator_type def __call__(self, _: ModelProto, model: OnnxModel) -> Tuple[Set[str], Set[str]]: candidate_nodes_to_exclude = {candidate_output: candidate.name for candidate in model.get_nodes_by_op_type(self.operator_type_to_exclude) for candidate_output in candidate.output} nodes_of_following_type = {node_input: node.name for node in model.get_nodes_by_op_type(self.following_operator_type) for node_input in node.input} to_exclude = set(candidate_nodes_to_exclude.keys()).intersection(nodes_of_following_type.keys()) nodes_to_exclude = {candidate_nodes_to_exclude[node] for node in to_exclude} return (set(), nodes_to_exclude) class ExcludeNodeAfter(PreprocessorPass): def __init__(self, parent_operator_type: str, operator_type_to_exclude: str): super().__init__() self.parent_operator_type = parent_operator_type self.operator_type_to_exclude = operator_type_to_exclude def __call__(self, graph: ModelProto, model: OnnxModel) -> Tuple[Set[str], Set[str]]: candidate_nodes_to_exclude = {candidate_input: candidate.name for candidate in model.get_nodes_by_op_type(self.operator_type_to_exclude) for candidate_input in candidate.input} parent_node = {node_output: node.name for node in model.get_nodes_by_op_type(self.parent_operator_type) for node_output in node.output} to_exclude = set(candidate_nodes_to_exclude.keys()).intersection(parent_node.keys()) nodes_to_exclude = {candidate_nodes_to_exclude[node] for node in to_exclude} return (set(), nodes_to_exclude) # File: optimum-main/optimum/onnxruntime/preprocessors/passes/fully_connected.py from typing import Set, Tuple from onnx import ModelProto from onnxruntime.transformers.onnx_model import OnnxModel from .. import PreprocessorPass class IncludeFullyConnectedNodes(PreprocessorPass): def __init__(self): super().__init__() def __call__(self, graph: ModelProto, model: OnnxModel) -> Tuple[Set[str], Set[str]]: fc_subgraphs = [] for add_node in model.get_nodes_by_op_type('Add'): fc_components = model.match_parent_path(add_node, ['MatMul'], [1]) if fc_components is not None: fc_components.append(add_node) fc_subgraphs.append(fc_components) fc_components = {node.name for fc in fc_subgraphs for node in fc} return (fc_components, set()) # File: optimum-main/optimum/onnxruntime/preprocessors/passes/gelu.py from typing import Set, Tuple from onnx import ModelProto from onnxruntime.transformers.onnx_model import OnnxModel from .. import PreprocessorPass class ExcludeGeLUNodes(PreprocessorPass): def __init__(self): super().__init__() def __call__(self, graph: ModelProto, model: OnnxModel) -> Tuple[Set[str], Set[str]]: gelu_subgraphs = [] for mul_node in model.get_nodes_by_op_type('Mul'): gelu_components = model.match_parent_path(mul_node, ['Mul', 'Add', 'Erf', 'Div'], [0, 1, 0, 0]) if gelu_components is not None: gelu_components.append(mul_node) gelu_subgraphs.append(gelu_components) gl_components = (node.name for gl in gelu_subgraphs for node in gl) return (set(), set(gl_components)) # File: optimum-main/optimum/onnxruntime/preprocessors/passes/layernorm.py from typing import Set, Tuple from onnx import ModelProto from onnxruntime.transformers.onnx_model import OnnxModel from .. import PreprocessorPass class ExcludeLayerNormNodes(PreprocessorPass): def __init__(self): super().__init__() def __call__(self, graph: ModelProto, model: OnnxModel) -> Tuple[Set[str], Set[str]]: layer_norm_subgraphs = [] for add_node in model.get_nodes_by_op_type('Add'): layer_norm_components = model.match_parent_path(add_node, ['Mul', 'Div', 'Sqrt', 'Add', 'ReduceMean', 'Pow', 'Sub', 'ReduceMean'], [0, 0, 1, 0, 0, 0, 0, 1]) if layer_norm_components is not None: layer_norm_components.append(add_node) layer_norm_subgraphs.append(layer_norm_components) ln_components = (node.name for ln in layer_norm_subgraphs for node in ln) return (set(), set(ln_components)) # File: optimum-main/optimum/onnxruntime/preprocessors/quantization.py from abc import ABC, abstractmethod from logging import getLogger from os import PathLike from pathlib import Path from typing import Optional, Set, Tuple, Union from onnx import ModelProto, load_model from onnxruntime.transformers.onnx_model import OnnxModel LOGGER = getLogger('GraphWalker') class PreprocessorPass(ABC): def __init__(self): self._logger = LOGGER @abstractmethod def __call__(self, graph: ModelProto, model: OnnxModel) -> Tuple[Optional[Set[str]], Optional[Set[str]]]: raise NotImplementedError() class QuantizationPreprocessor: __slots__ = ('_passes',) def __init__(self): self._passes = [] def from_config(self, config): pass def register_pass(self, target: PreprocessorPass): if target not in self._passes: self._passes.append(target) def collect(self, model_or_path: Union[str, PathLike, Path, bytes]) -> Tuple[Set[str], Set[str]]: (global_nodes_to_quantize, global_nodes_to_exclude) = (set(), set()) graph = load_model(model_or_path.as_posix() if isinstance(model_or_path, Path) else model_or_path) model = OnnxModel(graph) for walking_pass in self._passes: (nodes_to_quantize, nodes_to_exclude) = walking_pass(graph, model) if nodes_to_quantize is not None: global_nodes_to_quantize.update(nodes_to_quantize) if nodes_to_exclude is not None: global_nodes_to_exclude.update(nodes_to_exclude) global_nodes_to_quantize = global_nodes_to_quantize - global_nodes_to_exclude return (global_nodes_to_quantize, global_nodes_to_exclude) # File: optimum-main/optimum/onnxruntime/quantization.py """""" import logging import os import warnings from collections import defaultdict from pathlib import Path from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union import onnx from datasets import Dataset, load_dataset from packaging.version import Version, parse from transformers import AutoConfig from onnxruntime import __version__ as ort_version from onnxruntime.quantization import CalibrationDataReader, QuantFormat, QuantizationMode, QuantType from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.qdq_quantizer import QDQQuantizer from ..quantization_base import OptimumQuantizer from ..utils.save_utils import maybe_save_preprocessors from . import ORTQuantizableOperator from .configuration import CalibrationConfig, ORTConfig, QuantizationConfig from .modeling_ort import ORTModel from .modeling_seq2seq import ORTModelForConditionalGeneration from .preprocessors import QuantizationPreprocessor if TYPE_CHECKING: from transformers import PretrainedConfig LOGGER = logging.getLogger(__name__) class ORTCalibrationDataReader(CalibrationDataReader): __slots__ = ['batch_size', 'dataset', '_dataset_iter'] def __init__(self, dataset: Dataset, batch_size: int=1): if dataset is None: raise ValueError('Provided dataset is None.') if batch_size <= 0: raise ValueError(f'Provided batch_size should be >= 1 (got: {batch_size}).') self.dataset = dataset self.batch_size = batch_size self._dataset_iter = iter(self.dataset) def get_next(self): featurized_samples = None try: if self.batch_size == 1: featurized_samples = {key: [value] for (key, value) in next(self._dataset_iter).items()} else: featurized_samples = defaultdict(list) for _ in range(self.batch_size): sample = next(self._dataset_iter) for (name, value) in sample.items(): featurized_samples[name] += [value] except StopIteration: pass if featurized_samples is not None and len(featurized_samples) > 0: return featurized_samples return None class ORTQuantizer(OptimumQuantizer): def __init__(self, onnx_model_path: Path, config: Optional['PretrainedConfig']=None): super().__init__() self.onnx_model_path = onnx_model_path self.config = config if self.config is None: try: self.config = AutoConfig.from_pretrained(self.onnx_model_path.parent) except OSError: LOGGER.warning(f'Could not load the config for {self.onnx_model_path} automatically, this might make the quantized model harder to use because it will not be able to be loaded by an ORTModel without having to specify the configuration explicitly.') self._calibrator = None @classmethod def from_pretrained(cls, model_or_path: Union['ORTModel', str, Path], file_name: Optional[str]=None) -> 'ORTQuantizer': ort_quantizer_error_message = 'ORTQuantizer does not support multi-file quantization. Please create separate ORTQuantizer instances for each model/file, by passing the argument `file_name` to ORTQuantizer.from_pretrained().' if isinstance(model_or_path, str): model_or_path = Path(model_or_path) path = None if isinstance(model_or_path, ORTModelForConditionalGeneration): raise NotImplementedError(ort_quantizer_error_message) elif isinstance(model_or_path, Path) and file_name is None: onnx_files = list(model_or_path.glob('*.onnx')) if len(onnx_files) == 0: raise FileNotFoundError(f'Could not find any ONNX model file in {model_or_path}') elif len(onnx_files) > 1: raise RuntimeError(f'Found too many ONNX model files in {model_or_path}. {ort_quantizer_error_message}') file_name = onnx_files[0].name if isinstance(model_or_path, ORTModel): if path is None: path = Path(model_or_path.model._model_path) elif os.path.isdir(model_or_path): path = Path(model_or_path) / file_name else: raise ValueError(f'Unable to load model from {model_or_path}.') return cls(path) def fit(self, dataset: Dataset, calibration_config: CalibrationConfig, onnx_augmented_model_name: Union[str, Path]='augmented_model.onnx', operators_to_quantize: Optional[List[str]]=None, batch_size: int=1, use_external_data_format: bool=False, use_gpu: bool=False, force_symmetric_range: bool=False) -> Dict[str, Tuple[float, float]]: LOGGER.info(f'Using static quantization schema (dataset: {calibration_config.dataset_name}, method: {calibration_config.method})') self.partial_fit(dataset, calibration_config, onnx_augmented_model_name, operators_to_quantize, batch_size, use_external_data_format, use_gpu, force_symmetric_range) return self.compute_ranges() def partial_fit(self, dataset: Dataset, calibration_config: CalibrationConfig, onnx_augmented_model_name: Union[str, Path]='augmented_model.onnx', operators_to_quantize: Optional[List[str]]=None, batch_size: int=1, use_external_data_format: bool=False, use_gpu: bool=False, force_symmetric_range: bool=False): if calibration_config.method is not None: LOGGER.info(f'Creating calibrator: {calibration_config.method}({calibration_config})') self._calibrator = calibration_config.create_calibrator(onnx_model_path=self.onnx_model_path.as_posix(), use_external_data_format=use_external_data_format, augmented_model_name=onnx_augmented_model_name, operators_to_quantize=operators_to_quantize, force_symmetric_range=force_symmetric_range) if use_gpu: self._calibrator.set_execution_providers(execution_providers=['CUDAExecutionProvider']) LOGGER.info('Collecting tensors statistics...') reader = ORTCalibrationDataReader(dataset, batch_size) self._calibrator.collect_data(reader) def compute_ranges(self) -> Dict[str, Tuple[float, float]]: if self._calibrator is None: raise ValueError('Calibrator is None, please call `partial_fit` or `fit` method at least ones to compute ranges.') LOGGER.info('Computing calibration ranges') if parse(ort_version) >= Version('1.16.0'): return self._calibrator.compute_data() return self._calibrator.compute_range() def quantize(self, quantization_config: QuantizationConfig, save_dir: Union[str, Path], file_suffix: Optional[str]='quantized', calibration_tensors_range: Optional[Dict[str, Tuple[float, float]]]=None, use_external_data_format: bool=False, preprocessor: Optional[QuantizationPreprocessor]=None) -> Path: use_qdq = quantization_config.is_static and quantization_config.format == QuantFormat.QDQ save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) if quantization_config.is_static and calibration_tensors_range is None: raise ValueError('Requested static quantization in the QuantizationConfig, but no calibration ranges were provided. Please run calibration first using the quantizer fit method, or use dynamic quantization.') if not quantization_config.is_static: if quantization_config.mode != QuantizationMode.IntegerOps: LOGGER.warning(f'ONNX Runtime dynamic quantization mode should be QuantizationMode.IntegerOps (got: {quantization_config.mode}).') if quantization_config.activations_dtype != QuantType.QUInt8: LOGGER.warning(f'ONNX Runtime dynamic quantization activations data type should be QuantType.QUInt8 (got: {quantization_config.activations_dtype}).') LOGGER.info(f"Creating {('static' if quantization_config.is_static else 'dynamic')} quantizer: {quantization_config}") if preprocessor is not None: LOGGER.info('Preprocessor detected, collecting nodes to include/exclude') (nodes_to_quantize, nodes_to_exclude) = preprocessor.collect(self.onnx_model_path) nodes_to_quantize.update(quantization_config.nodes_to_quantize) nodes_to_exclude.update(quantization_config.nodes_to_exclude) quantization_config.nodes_to_quantize = list(nodes_to_quantize) quantization_config.nodes_to_exclude = list(nodes_to_exclude) has_subgraphs = False onnx_model = onnx.load(Path(self.onnx_model_path).as_posix()) for node in onnx_model.graph.node: if node.op_type in ['If', 'Loop', 'Scan', 'SequenceMap']: has_subgraphs = True break if has_subgraphs: if quantization_config.is_static: raise NotImplementedError('Static quantization is currently not supported for models with subgraphs.') if parse(ort_version) == Version('1.16.0'): raise ValueError('ONNX Runtime version v1.16.0 is not compatible with quantization for models with subgraphs, please downgrade to 1.15.1 or upgrade to a higher version. Reference: https://github.com/microsoft/onnxruntime/pull/17651') quantizer_factory = QDQQuantizer if use_qdq else ONNXQuantizer quantizer_kwargs = {'model': onnx_model, 'static': quantization_config.is_static, 'per_channel': quantization_config.per_channel, 'mode': quantization_config.mode, 'weight_qType': quantization_config.weights_dtype, 'input_qType': quantization_config.activations_dtype, 'tensors_range': calibration_tensors_range, 'reduce_range': quantization_config.reduce_range, 'nodes_to_quantize': quantization_config.nodes_to_quantize, 'nodes_to_exclude': quantization_config.nodes_to_exclude, 'op_types_to_quantize': [operator.value if isinstance(operator, ORTQuantizableOperator) else operator for operator in quantization_config.operators_to_quantize], 'extra_options': {'WeightSymmetric': quantization_config.weights_symmetric, 'ActivationSymmetric': quantization_config.activations_symmetric, 'EnableSubgraph': has_subgraphs, 'ForceSymmetric': quantization_config.activations_symmetric and quantization_config.weights_symmetric, 'AddQDQPairToWeight': quantization_config.qdq_add_pair_to_weight, 'DedicatedQDQPair': quantization_config.qdq_dedicated_pair, 'QDQOpTypePerChannelSupportToAxis': quantization_config.qdq_op_type_per_channel_support_to_axis}} if use_qdq: quantizer_kwargs.pop('mode') if parse(ort_version) >= Version('1.18.0'): quantizer_kwargs.pop('static') if parse(ort_version) >= Version('1.13.0'): quantizer_kwargs['activation_qType'] = quantizer_kwargs.pop('input_qType') quantizer = quantizer_factory(**quantizer_kwargs) LOGGER.info('Quantizing model...') quantizer.quantize_model() suffix = f'_{file_suffix}' if file_suffix else '' quantized_model_path = save_dir.joinpath(f'{self.onnx_model_path.stem}{suffix}').with_suffix('.onnx') LOGGER.info(f'Saving quantized model at: {save_dir} (external data format: {use_external_data_format})') quantizer.model.save_model_to_file(quantized_model_path.as_posix(), use_external_data_format) ort_config = ORTConfig(quantization=quantization_config, use_external_data_format=use_external_data_format) ort_config.save_pretrained(save_dir) if self.config is not None: self.config.save_pretrained(save_dir) maybe_save_preprocessors(self.onnx_model_path.parent, save_dir) return Path(save_dir) def get_calibration_dataset(self, dataset_name: str, num_samples: int=100, dataset_config_name: Optional[str]=None, dataset_split: Optional[str]=None, preprocess_function: Optional[Callable]=None, preprocess_batch: bool=True, seed: int=2016, use_auth_token: Optional[Union[bool, str]]=None, token: Optional[Union[bool, str]]=None) -> Dataset: if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.', FutureWarning) if token is not None: raise ValueError('You cannot use both `use_auth_token` and `token` arguments at the same time.') token = use_auth_token if dataset_name is None: raise ValueError('ORTQuantizer: Static quantization calibration step requires a dataset_name if no calib_dataset is provided.') calib_dataset = load_dataset(dataset_name, name=dataset_config_name, split=dataset_split, token=token) if num_samples is not None: num_samples = min(num_samples, len(calib_dataset)) calib_dataset = calib_dataset.shuffle(seed=seed).select(range(num_samples)) if preprocess_function is not None: processed_calib_dataset = calib_dataset.map(preprocess_function, batched=preprocess_batch) else: processed_calib_dataset = calib_dataset return self.clean_calibration_dataset(processed_calib_dataset) def clean_calibration_dataset(self, dataset: Dataset) -> Dataset: model = onnx.load(self.onnx_model_path) model_inputs = {input.name for input in model.graph.input} ignored_columns = list(set(dataset.column_names) - model_inputs) return dataset.remove_columns(ignored_columns) # File: optimum-main/optimum/onnxruntime/runs/__init__.py import copy import os from pathlib import Path from transformers import pipeline as _transformers_pipeline from transformers.onnx import FeaturesManager from transformers.onnx.utils import get_preprocessor from onnxruntime.quantization import QuantFormat, QuantizationMode, QuantType from ...pipelines import ORT_SUPPORTED_TASKS from ...pipelines import pipeline as _optimum_pipeline from ...runs_base import Run, TimeBenchmark, get_autoclass_name, task_processing_map from .. import ORTQuantizer from ..configuration import QuantizationConfig from ..modeling_ort import ORTModel from ..preprocessors import QuantizationPreprocessor from .calibrator import OnnxRuntimeCalibrator from .utils import task_ortmodel_map class OnnxRuntimeRun(Run): def __init__(self, run_config): super().__init__(run_config) qconfig = QuantizationConfig(is_static=self.static_quantization, format=QuantFormat.QDQ if self.static_quantization else QuantFormat.QOperator, mode=QuantizationMode.QLinearOps if self.static_quantization else QuantizationMode.IntegerOps, activations_dtype=QuantType.QInt8 if self.static_quantization else QuantType.QUInt8, weights_dtype=QuantType.QInt8, per_channel=run_config['per_channel'], reduce_range=False, operators_to_quantize=run_config['operators_to_quantize']) onnx_model = ORT_SUPPORTED_TASKS[self.task]['class'][0].from_pretrained(run_config['model_name_or_path'], export=True) trfs_model = FeaturesManager.get_model_from_feature(onnx_model.export_feature, run_config['model_name_or_path']) quantizer = ORTQuantizer.from_pretrained(onnx_model) self.preprocessor = get_preprocessor(run_config['model_name_or_path']) self.batch_sizes = run_config['batch_sizes'] self.input_lengths = run_config['input_lengths'] self.time_benchmark_args = run_config['time_benchmark_args'] self.model_path = 'model.onnx' self.quantized_model_path = 'model_quantized.onnx' processing_class = task_processing_map[self.task] self.task_processor = processing_class(dataset_path=run_config['dataset']['path'], dataset_name=run_config['dataset']['name'], calibration_split=run_config['dataset']['calibration_split'], eval_split=run_config['dataset']['eval_split'], preprocessor=self.preprocessor, data_keys=run_config['dataset']['data_keys'], ref_keys=run_config['dataset']['ref_keys'], task_args=run_config['task_args'], static_quantization=self.static_quantization, num_calibration_samples=run_config['calibration']['num_calibration_samples'] if self.static_quantization else None, config=trfs_model.config, max_eval_samples=run_config['max_eval_samples']) self.metric_names = run_config['metrics'] self.load_datasets() quantization_preprocessor = QuantizationPreprocessor() ranges = None if self.static_quantization: calibration_dataset = self.get_calibration_dataset() calibrator = OnnxRuntimeCalibrator(calibration_dataset, quantizer, self.model_path, qconfig, calibration_params=run_config['calibration'], node_exclusion=run_config['node_exclusion']) (ranges, quantization_preprocessor) = calibrator.fit() quantizer.quantize(save_dir='./', calibration_tensors_range=ranges, quantization_config=qconfig, preprocessor=quantization_preprocessor) ort_session = ORTModel.load_model(str(Path('./') / self.quantized_model_path)) self.ort_model = task_ortmodel_map[self.task](ort_session, config=trfs_model.config) model_class = FeaturesManager.get_model_class_for_feature(get_autoclass_name(self.task)) self.torch_model = model_class.from_pretrained(run_config['model_name_or_path']) self.return_body['model_type'] = self.torch_model.config.model_type def _launch_time(self, trial): batch_size = trial.suggest_categorical('batch_size', self.batch_sizes) input_length = trial.suggest_categorical('input_length', self.input_lengths) model_input_names = set(self.preprocessor.model_input_names) print('Running ONNX Runtime time benchmark.') ort_benchmark = TimeBenchmark(self.ort_model, input_length=input_length, batch_size=batch_size, model_input_names=model_input_names, warmup_runs=self.time_benchmark_args['warmup_runs'], duration=self.time_benchmark_args['duration']) optimized_time_metrics = ort_benchmark.execute() print('Running Pytorch time benchmark.') torch_benchmark = TimeBenchmark(self.torch_model, input_length=input_length, batch_size=batch_size, model_input_names=model_input_names, warmup_runs=self.time_benchmark_args['warmup_runs'], duration=self.time_benchmark_args['duration']) baseline_time_metrics = torch_benchmark.execute() time_evaluation = {'batch_size': batch_size, 'input_length': input_length, 'baseline': baseline_time_metrics, 'optimized': optimized_time_metrics} self.return_body['evaluation']['time'].append(time_evaluation) return (0, 0) def launch_eval(self): kwargs = self.task_processor.get_pipeline_kwargs() ort_pipeline = _optimum_pipeline(task=self.task, model=self.ort_model, tokenizer=self.preprocessor, feature_extractor=self.preprocessor, accelerator='ort', **kwargs) transformers_pipeline = _transformers_pipeline(task=self.task, model=self.torch_model, tokenizer=self.preprocessor, feature_extractor=self.preprocessor, **kwargs) eval_dataset = self.get_eval_dataset() print('Running evaluation...') baseline_metrics_dict = self.task_processor.run_evaluation(eval_dataset, transformers_pipeline, self.metric_names) optimized_metrics_dict = self.task_processor.run_evaluation(eval_dataset, ort_pipeline, self.metric_names) baseline_metrics_dict.pop('total_time_in_seconds', None) baseline_metrics_dict.pop('samples_per_second', None) baseline_metrics_dict.pop('latency_in_seconds', None) optimized_metrics_dict.pop('total_time_in_seconds', None) optimized_metrics_dict.pop('samples_per_second', None) optimized_metrics_dict.pop('latency_in_seconds', None) self.return_body['evaluation']['others']['baseline'].update(baseline_metrics_dict) self.return_body['evaluation']['others']['optimized'].update(optimized_metrics_dict) def finalize(self): if os.path.isfile(self.quantized_model_path): os.remove(self.quantized_model_path) if os.path.isfile(self.model_path): os.remove(self.model_path) # File: optimum-main/optimum/onnxruntime/runs/calibrator.py from typing import Dict, List from datasets import Dataset from ...runs_base import Calibrator from .. import ORTQuantizer from ..configuration import AutoCalibrationConfig, QuantizationConfig from ..preprocessors import QuantizationPreprocessor from ..preprocessors.passes import ExcludeGeLUNodes, ExcludeLayerNormNodes, ExcludeNodeAfter, ExcludeNodeFollowedBy class OnnxRuntimeCalibrator(Calibrator): def __init__(self, calibration_dataset: Dataset, quantizer: ORTQuantizer, model_path: str, qconfig: QuantizationConfig, calibration_params: Dict, node_exclusion: List[str]): super().__init__(calibration_dataset=calibration_dataset, quantizer=quantizer, model_path=model_path, qconfig=qconfig, calibration_params=calibration_params, node_exclusion=node_exclusion) self.calibration_dataset = self.quantizer.clean_calibration_dataset(calibration_dataset) def fit(self): quantization_preprocessor = QuantizationPreprocessor() if 'layernorm' in self.node_exclusion: quantization_preprocessor.register_pass(ExcludeLayerNormNodes()) if 'gelu' in self.node_exclusion: quantization_preprocessor.register_pass(ExcludeGeLUNodes()) if 'residual' in self.node_exclusion: quantization_preprocessor.register_pass(ExcludeNodeAfter('Add', 'Add')) if 'gather' in self.node_exclusion: quantization_preprocessor.register_pass(ExcludeNodeAfter('Gather', 'Add')) if 'softmax' in self.node_exclusion: quantization_preprocessor.register_pass(ExcludeNodeFollowedBy('Add', 'Softmax')) if self.calibration_params['method'] == 'entropy': calibration_config = AutoCalibrationConfig.entropy(self.calibration_dataset) elif self.calibration_params['method'] == 'percentile': calibration_config = AutoCalibrationConfig.percentiles(self.calibration_dataset, percentile=self.calibration_params['calibration_histogram_percentile']) else: calibration_config = AutoCalibrationConfig.minmax(self.calibration_dataset, self.calibration_params['calibration_moving_average'], self.calibration_params['calibration_moving_average_constant']) num_calibration_shards = 4 if not 1 <= num_calibration_shards <= len(self.calibration_dataset): raise ValueError(f'Invalid value of number of shards {num_calibration_shards} chosen to split the calibration dataset, should be higher than 0 and lower or equal to the number of samples {len(self.calibration_dataset)}.') for i in range(num_calibration_shards): shard = self.calibration_dataset.shard(num_calibration_shards, i) self.quantizer.partial_fit(dataset=shard, calibration_config=calibration_config, onnx_model_path=self.model_path, operators_to_quantize=self.qconfig.operators_to_quantize, batch_size=8, use_external_data_format=False) ranges = self.quantizer.compute_ranges() return (ranges, quantization_preprocessor) # File: optimum-main/optimum/onnxruntime/runs/utils.py from ..modeling_decoder import ORTModelForCausalLM from ..modeling_ort import ORTModelForFeatureExtraction, ORTModelForImageClassification, ORTModelForQuestionAnswering, ORTModelForSequenceClassification, ORTModelForTokenClassification task_ortmodel_map = {'text-generation': ORTModelForCausalLM, 'feature-extraction': ORTModelForFeatureExtraction, 'image-classification': ORTModelForImageClassification, 'question-answering': ORTModelForQuestionAnswering, 'text-classification': ORTModelForSequenceClassification, 'token-classification': ORTModelForTokenClassification} # File: optimum-main/optimum/onnxruntime/subpackage/commands/base.py """""" from optimum.commands import BaseOptimumCLICommand, CommandInfo, optimum_cli_subcommand from .optimize import ONNXRuntimeOptimizeCommand from .quantize import ONNXRuntimeQuantizeCommand @optimum_cli_subcommand() class ONNXRuntimeCommand(BaseOptimumCLICommand): COMMAND = CommandInfo(name='onnxruntime', help='ONNX Runtime optimize and quantize utilities.') SUBCOMMANDS = (CommandInfo(name='optimize', help='Optimize ONNX models.', subcommand_class=ONNXRuntimeOptimizeCommand), CommandInfo(name='quantize', help='Dynammic quantization for ONNX models.', subcommand_class=ONNXRuntimeQuantizeCommand)) # File: optimum-main/optimum/onnxruntime/subpackage/commands/optimize.py """""" from pathlib import Path from typing import TYPE_CHECKING from optimum.commands.base import BaseOptimumCLICommand if TYPE_CHECKING: from argparse import ArgumentParser def parse_args_onnxruntime_optimize(parser: 'ArgumentParser'): required_group = parser.add_argument_group('Required arguments') required_group.add_argument('--onnx_model', type=Path, required=True, help='Path to the repository where the ONNX models to optimize are located.') required_group.add_argument('-o', '--output', type=Path, required=True, help='Path to the directory where to store generated ONNX model.') level_group = parser.add_mutually_exclusive_group(required=True) level_group.add_argument('-O1', action='store_true', help='Basic general optimizations (see: https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization for more details).') level_group.add_argument('-O2', action='store_true', help='Basic and extended general optimizations, transformers-specific fusions (see: https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization for more details).') level_group.add_argument('-O3', action='store_true', help='Same as O2 with Gelu approximation (see: https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization for more details).') level_group.add_argument('-O4', action='store_true', help='Same as O3 with mixed precision (see: https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization for more details).') level_group.add_argument('-c', '--config', type=Path, help='`ORTConfig` file to use to optimize the model.') class ONNXRuntimeOptimizeCommand(BaseOptimumCLICommand): @staticmethod def parse_args(parser: 'ArgumentParser'): return parse_args_onnxruntime_optimize(parser) def run(self): from ...configuration import AutoOptimizationConfig, ORTConfig from ...optimization import ORTOptimizer if self.args.output == self.args.onnx_model: raise ValueError('The output directory must be different than the directory hosting the ONNX model.') save_dir = self.args.output file_names = [model.name for model in self.args.onnx_model.glob('*.onnx')] optimizer = ORTOptimizer.from_pretrained(self.args.onnx_model, file_names) if self.args.config: optimization_config = ORTConfig.from_pretrained(self.args.config).optimization elif self.args.O1: optimization_config = AutoOptimizationConfig.O1() elif self.args.O2: optimization_config = AutoOptimizationConfig.O2() elif self.args.O3: optimization_config = AutoOptimizationConfig.O3() elif self.args.O4: optimization_config = AutoOptimizationConfig.O4() else: raise ValueError('Either -O1, -O2, -O3, -O4 or -c must be specified.') optimizer.optimize(save_dir=save_dir, optimization_config=optimization_config) # File: optimum-main/optimum/onnxruntime/subpackage/commands/quantize.py """""" from pathlib import Path from typing import TYPE_CHECKING from optimum.commands import BaseOptimumCLICommand if TYPE_CHECKING: from argparse import ArgumentParser def parse_args_onnxruntime_quantize(parser: 'ArgumentParser'): required_group = parser.add_argument_group('Required arguments') required_group.add_argument('--onnx_model', type=Path, required=True, help='Path to the repository where the ONNX models to quantize are located.') required_group.add_argument('-o', '--output', type=Path, required=True, help='Path to the directory where to store generated ONNX model.') optional_group = parser.add_argument_group('Optional arguments') optional_group.add_argument('--per_channel', action='store_true', help='Compute the quantization parameters on a per-channel basis.') level_group = parser.add_mutually_exclusive_group(required=True) level_group.add_argument('--arm64', action='store_true', help='Quantization for the ARM64 architecture.') level_group.add_argument('--avx2', action='store_true', help='Quantization with AVX-2 instructions.') level_group.add_argument('--avx512', action='store_true', help='Quantization with AVX-512 instructions.') level_group.add_argument('--avx512_vnni', action='store_true', help='Quantization with AVX-512 and VNNI instructions.') level_group.add_argument('--tensorrt', action='store_true', help='Quantization for NVIDIA TensorRT optimizer.') level_group.add_argument('-c', '--config', type=Path, help='`ORTConfig` file to use to optimize the model.') class ONNXRuntimeQuantizeCommand(BaseOptimumCLICommand): @staticmethod def parse_args(parser: 'ArgumentParser'): return parse_args_onnxruntime_quantize(parser) def run(self): from ...configuration import AutoQuantizationConfig, ORTConfig from ...quantization import ORTQuantizer if self.args.output == self.args.onnx_model: raise ValueError('The output directory must be different than the directory hosting the ONNX model.') save_dir = self.args.output quantizers = [] use_external_data_format = False quantizers = [ORTQuantizer.from_pretrained(self.args.onnx_model, file_name=model.name) for model in self.args.onnx_model.glob('*.onnx')] if self.args.arm64: qconfig = AutoQuantizationConfig.arm64(is_static=False, per_channel=self.args.per_channel) elif self.args.avx2: qconfig = AutoQuantizationConfig.avx2(is_static=False, per_channel=self.args.per_channel) elif self.args.avx512: qconfig = AutoQuantizationConfig.avx512(is_static=False, per_channel=self.args.per_channel) elif self.args.avx512_vnni: qconfig = AutoQuantizationConfig.avx512_vnni(is_static=False, per_channel=self.args.per_channel) elif self.args.tensorrt: raise ValueError('TensorRT quantization relies on static quantization that requires calibration, which is currently not supported through optimum-cli. Please adapt Optimum static quantization examples to run static quantization for TensorRT: https://github.com/huggingface/optimum/tree/main/examples/onnxruntime/quantization') else: config = ORTConfig.from_pretrained(self.args.config) qconfig = config.quantization use_external_data_format = config.use_external_data_format for q in quantizers: q.quantize(save_dir=save_dir, quantization_config=qconfig, use_external_data_format=use_external_data_format) # File: optimum-main/optimum/onnxruntime/trainer.py """""" import functools import math import os import shutil import sys import time import types import warnings from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union from transformers.integrations import hp_params from transformers.utils import is_accelerate_available from packaging import version if is_accelerate_available(): from accelerate import __version__ as accelerate_version from accelerate.utils import DistributedDataParallelKwargs if version.parse(accelerate_version) >= version.parse('0.16'): from accelerate import skip_first_batches else: skip_first_batches = None from accelerate.utils import DistributedType else: raise ImportError('The package `accelerate` is required to use the ORTTrainer. Please install it following https://huggingface.co/docs/accelerate/basic_tutorials/install.') import huggingface_hub.utils as hf_hub_utils import torch import torch.distributed as dist from torch import nn from torch.utils.data import Dataset, RandomSampler from transformers.data.data_collator import DataCollator from transformers.debug_utils import DebugOption, DebugUnderflowOverflow from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer import Trainer from transformers.trainer_callback import TrainerCallback, TrainerState from transformers.trainer_pt_utils import get_model_param_count, get_module_class_from_name, get_parameter_names from transformers.trainer_utils import EvalPrediction, HPSearchBackend, TrainOutput, enable_full_determinism, find_executable_batch_size, get_last_checkpoint, has_length, set_seed, speed_metrics from transformers.training_args import ParallelMode from transformers.utils import is_apex_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled from ..utils import logging from ..utils.import_utils import check_if_transformers_greater from .training_args import ORTOptimizerNames, ORTTrainingArguments from .utils import is_onnxruntime_training_available if is_apex_available(): from apex import amp if check_if_transformers_greater('4.33'): from transformers.integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint, is_deepspeed_zero3_enabled else: from transformers.deepspeed import deepspeed_init, deepspeed_load_checkpoint, is_deepspeed_zero3_enabled if check_if_transformers_greater('4.39'): from transformers.utils import is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm else: from transformers.utils import is_torch_tpu_available if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm if TYPE_CHECKING: import optuna TRAINER_STATE_NAME = 'trainer_state.json' logger = logging.get_logger(__name__) class ModuleWithLoss(nn.Module): def __init__(self, model, args, label_smoother): super().__init__() self._original_model = model self.args = args self.label_smoother = label_smoother def forward(self, inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs): return self.compute_model_plus_loss_internal(self._original_model, inputs, return_outputs) @property def module(self): return self._original_model.module @property def config(self): return self._original_model.config class ORTTrainer(Trainer): def __init__(self, model: Union[PreTrainedModel, nn.Module]=None, args: ORTTrainingArguments=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None): super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if args.use_module_with_loss: self._training_model = self.create_model_with_loss() self.model = model if self.args.local_rank: torch.cuda.set_device(self.args.local_rank) def create_model_with_loss(self): model_with_loss = ModuleWithLoss(self.model, self.args, self.label_smoother) model_with_loss.compute_model_plus_loss_internal = types.MethodType(Trainer.compute_loss, model_with_loss) return model_with_loss def _set_signature_columns_if_needed(self): if self._signature_columns is None: import inspect if isinstance(self.model, ModuleWithLoss): signature = inspect.signature(self.model._original_model.forward) else: signature = inspect.signature(self.model.forward) self._signature_columns = list(signature.parameters.keys()) self._signature_columns += list(set(['label', 'label_ids'] + self.label_names)) def compute_loss(self, model_with_loss, inputs, return_outputs=False): if isinstance(self.model, ModuleWithLoss): dict_inputs = dict(inputs.items()) return model_with_loss(dict_inputs, return_outputs) else: return super().compute_loss(model_with_loss, inputs, return_outputs) def train(self, resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union['optuna.Trial', Dict[str, Any]]=None, ignore_keys_for_eval: Optional[List[str]]=None, **kwargs): if not is_onnxruntime_training_available(): raise ImportError('You need to install `onnxruntime-training` to use `ORTTrainer` for training. Check out https://huggingface.co/docs/optimum/onnxruntime/usage_guides/trainer#install-onnx-runtime.') if self.args.use_module_with_loss: self.model = self._training_model if resume_from_checkpoint is False: resume_from_checkpoint = None self._memory_tracker.start() args = self.args self.is_in_train = True if (args.fp16_full_eval or args.bf16_full_eval) and (not args.do_train): self._move_model_to_device(self.model, args.device) if 'model_path' in kwargs: resume_from_checkpoint = kwargs.pop('model_path') warnings.warn('`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.', FutureWarning) if len(kwargs) > 0: raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size model_reloaded = False if self.model_init is not None: enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True (self.optimizer, self.lr_scheduler) = (None, None) if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f'No valid checkpoint found in output directory ({args.output_dir})') if resume_from_checkpoint is not None and (not is_sagemaker_mp_enabled()) and (not self.is_deepspeed_enabled) and (not self.is_fsdp_enabled): self._load_from_checkpoint(resume_from_checkpoint) if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model inner_training_loop = find_executable_batch_size(self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size) if args.push_to_hub: try: hf_hub_utils.disable_progress_bars() return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval) def _inner_training_loop(self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None): from torch_ort import ORTModule self.accelerator.free_memory() self._train_batch_size = batch_size logger.debug(f'Currently training with a batch size of: {self._train_batch_size}') train_dataloader = self.get_train_dataloader() total_train_batch_size = self._train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(args.max_steps % num_update_steps_per_epoch > 0) num_train_samples = args.max_steps * total_train_batch_size else: max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs elif args.max_steps > 0: max_steps = args.max_steps num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError(f'args.max_steps must be set to a positive value if dataloader does not have a length, was {args.max_steps}') if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: raise ValueError('Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch).') else: debug_overflow = DebugUnderflowOverflow(self.model) delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled logger.info('Wrap ORTModule for ONNX Runtime training.') if self.args.save_onnx: from torch_ort import DebugOptions model = ORTModule(self.model, DebugOptions(save_onnx=self.args.save_onnx, onnx_prefix=self.args.onnx_prefix)) else: model = ORTModule(self.model) self.model_wrapped = model self.model = model if self._created_lr_scheduler: self.lr_scheduler = None self._created_lr_scheduler = False if self.is_deepspeed_enabled: if is_deepspeed_zero3_enabled(): raise NotImplementedError('`ORTTrainer` does not support ZeRO stage 3 for the moment. Please use DeepSpeed stage 1 or 2 instead.') if args.bf16: warnings.warn("ONNX Runtime doesn't support BF16 when executing some operators. The execution will fail if there are any op which doesn't support BF16 in the IR.", RuntimeWarning) self.model = model (self.optimizer, self.lr_scheduler) = deepspeed_init(self, num_training_steps=max_steps) if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(max_steps * args.save_steps) else: self.state.save_steps = args.save_steps if args.gradient_checkpointing: self.model.gradient_checkpointing_enable() model = self._wrap_model(self.model_wrapped) if (is_sagemaker_mp_enabled() or self.is_fsdp_enabled) and resume_from_checkpoint is not None: self._load_from_checkpoint(resume_from_checkpoint, model) use_accelerator_prepare = True if model is self.model else False if delay_optimizer_creation: if use_accelerator_prepare: self.model = self.accelerator.prepare(self.model) self.create_optimizer_and_scheduler(num_training_steps=max_steps) if use_accelerator_prepare: self.model.train() self.accelerator.distributed_type == DistributedType.DEEPSPEED if hasattr(self.lr_scheduler, 'step'): if self.use_apex: model = self.accelerator.prepare(self.model) else: (model, self.optimizer) = self.accelerator.prepare(self.model, self.optimizer) else: (model, self.optimizer, self.lr_scheduler) = self.accelerator.prepare(self.model, self.optimizer, self.lr_scheduler) self.model = unwrap_model(model) if self.is_deepspeed_enabled and args.fp16: from onnxruntime.training.optim.fp16_optimizer import FP16_Optimizer self.optimizer = FP16_Optimizer(self.optimizer) if self.is_fsdp_enabled: self.model = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if resume_from_checkpoint is not None and self.is_deepspeed_enabled: deepspeed_load_checkpoint(self.model_wrapped, resume_from_checkpoint) self._load_optimizer_and_scheduler(resume_from_checkpoint) logger.info('***** Running training *****') logger.info(f' Num examples = {num_examples:,}') logger.info(f' Num Epochs = {num_train_epochs:,}') logger.info(f' Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}') if self.args.per_device_train_batch_size != self._train_batch_size: logger.info(f' Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}') logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}') logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}') logger.info(f' Total optimization steps = {max_steps:,}') logger.info(f' Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}') self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)): self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % num_update_steps_per_epoch steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info(' Continuing training from checkpoint, will skip to saved global_step') logger.info(f' Continuing training from epoch {epochs_trained}') logger.info(f' Continuing training from global step {self.state.global_step}') if not args.ignore_data_skip: logger.info(f' Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} batches in the first epoch.') self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() tr_loss = torch.tensor(0.0).to(args.device) self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) def get_dataloader_sampler(dataloader): if hasattr(dataloader, 'batch_sampler') and dataloader.batch_sampler is not None: return get_dataloader_sampler(dataloader.batch_sampler) elif hasattr(dataloader, 'sampler'): return dataloader.sampler if not args.ignore_data_skip: for epoch in range(epochs_trained): sampler = get_dataloader_sampler(train_dataloader) is_random_sampler = isinstance(sampler, RandomSampler) if not is_random_sampler: for _ in train_dataloader: break else: sampler = sampler if sampler is not None else [] _ = list(sampler) total_batched_samples = 0 for epoch in range(epochs_trained, num_train_epochs): epoch_iterator = train_dataloader if args.past_index >= 0: self._past = None steps_in_epoch = len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and (steps_trained_in_current_epoch == 0): self._load_rng_state(resume_from_checkpoint) rng_to_sync = False steps_skipped = 0 if steps_trained_in_current_epoch > 0: epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch) steps_skipped = steps_trained_in_current_epoch steps_trained_in_current_epoch = 0 rng_to_sync = True step = -1 for (step, inputs) in enumerate(epoch_iterator): total_batched_samples += 1 if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) with self.accelerator.accumulate(model): tr_loss_step = self.training_step(model, inputs) if args.logging_nan_inf_filter and (not is_torch_tpu_available()) and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)): tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) is_last_step_and_steps_less_than_grad_acc = steps_in_epoch <= args.gradient_accumulation_steps and step + 1 == steps_in_epoch if total_batched_samples % args.gradient_accumulation_steps == 0 or is_last_step_and_steps_less_than_grad_acc: if is_last_step_and_steps_less_than_grad_acc or version.parse(accelerate_version) <= version.parse('0.20.3'): self.accelerator.gradient_state._set_sync_gradients(True) if args.max_grad_norm is not None and args.max_grad_norm > 0: if is_sagemaker_mp_enabled() and args.fp16: _grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm) elif hasattr(self.optimizer, 'clip_grad_norm'): _grad_norm = self.optimizer.clip_grad_norm(args.max_grad_norm) elif hasattr(model, 'clip_grad_norm_'): _grad_norm = model.clip_grad_norm_(args.max_grad_norm) else: _grad_norm = self.accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm) if is_accelerate_available() and self.accelerator.distributed_type == DistributedType.DEEPSPEED: grad_norm = model.get_global_grad_norm() else: grad_norm = _grad_norm.item() if _grad_norm is not None else None self.optimizer.step() optimizer_was_run = not self.accelerator.optimizer_step_was_skipped if optimizer_was_run: if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step() model.zero_grad() grad_norm: Optional[float] = None self.state.global_step += 1 self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning(f"There seems to be not a single sample in your train dataloader, stopping training at step {self.state.global_step}! This is expected if you're using an IterableDataset and set num_steps ({max_steps}) higher than the number of available samples.") self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: logger.warning('You enabled PyTorch/XLA debug metrics which is not supported by ONNX Runtime. Check your training configuration if this is unexpected.') if self.control.should_training_stop: break if args.past_index and hasattr(self, '_past'): delattr(self, '_past') logger.info('\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n') if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: if args.parallel_mode == ParallelMode.DISTRIBUTED: dist.barrier() self._load_best_model() self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics('train', start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics['total_flos'] = self.state.total_flos metrics['train_loss'] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) if self.args.should_save and self.state.best_model_checkpoint is not None and (self.args.save_total_limit == 1): for checkpoint in checkpoints_sorted: if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): logger.info(f'Deleting older checkpoint [{checkpoint}] due to args.save_total_limit') shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) self._finish_current_push() return TrainOutput(self.state.global_step, train_loss, metrics) def _wrap_model(self, model, training=True, dataloader=None): if self.args.use_ipex: dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 model = self.ipex_optimize_model(model, training, dtype=dtype) if is_sagemaker_mp_enabled(): raise NotImplementedError("Sagemaker's distrubuted data parallel features are not supported by `ORTTrainer`.") if unwrap_model(model) is not model: from torch_ort import ORTModule if not isinstance(model, ORTModule): return model if self.use_apex and training: (model, self.optimizer) = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) if self.args.fp16: from onnxruntime.training.optim.fp16_optimizer import FP16_Optimizer self.optimizer = FP16_Optimizer(self.optimizer) if self.args.n_gpu > 1 and (not getattr(model, 'is_loaded_in_8bit', False)): model = nn.DataParallel(model) if self.args.jit_mode_eval: start_time = time.time() model = self.torch_jit_model_eval(model, dataloader, training) self.jit_compilation_time = round(time.time() - start_time, 4) if not training: return model if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module from torch_xla.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy except ImportError: raise ImportError('Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.') auto_wrap_policy = None auto_wrapper_callable = None default_transformer_cls_names_to_wrap = getattr(model, '_no_split_modules', None) fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get('transformer_layer_cls_to_wrap', default_transformer_cls_names_to_wrap) if self.args.fsdp_config['min_num_params'] > 0: auto_wrap_policy = functools.partial(size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config['min_num_params']) elif fsdp_transformer_layer_cls_to_wrap is not None: transformer_cls_to_wrap = set() for layer_class in fsdp_transformer_layer_cls_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception('Could not find the transformer layer class to wrap in the model.') else: transformer_cls_to_wrap.add(transformer_cls) auto_wrap_policy = functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap) fsdp_kwargs = self.args.xla_fsdp_config if self.args.fsdp_config['xla_fsdp_grad_ckpt']: def auto_wrapper_callable(m, *args, **kwargs): return FSDP(checkpoint_module(m), *args, **kwargs) self.model = model = FSDP(model, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, **fsdp_kwargs) def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): loss = optimizer.step(**optimizer_args) if barrier: xm.mark_step() return loss xm.optimizer_step = patched_optimizer_step elif is_sagemaker_dp_enabled(): raise NotImplementedError("Sagemaker's distrubuted data parallel features are not supported by `ORTTrainer` yet.") elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: kwargs = {} if self.args.ddp_find_unused_parameters is not None: kwargs['find_unused_parameters'] = self.args.ddp_find_unused_parameters elif isinstance(model, PreTrainedModel): kwargs['find_unused_parameters'] = not model.is_gradient_checkpointing else: kwargs['find_unused_parameters'] = True if self.args.ddp_bucket_cap_mb is not None: kwargs['bucket_cap_mb'] = self.args.ddp_bucket_cap_mb if self.args.ddp_broadcast_buffers is not None: kwargs['broadcast_buffers'] = self.args.ddp_broadcast_buffers self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) return model def create_optimizer(self): opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.optimizer is None: decay_parameters = get_parameter_names(opt_model, [nn.LayerNorm]) decay_parameters = [name for name in decay_parameters if 'bias' not in name] optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if n in decay_parameters], 'weight_decay': self.args.weight_decay}, {'params': [p for (n, p) in opt_model.named_parameters() if n not in decay_parameters], 'weight_decay': 0.0}] if self.args.optim in ORTOptimizerNames: (optimizer_cls, optimizer_kwargs) = ORTTrainer.get_ort_optimizer_cls_and_kwargs(self.args) else: (optimizer_cls, optimizer_kwargs) = Trainer.get_optimizer_cls_and_kwargs(self.args) self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if optimizer_cls.__name__ == 'Adam8bit': import bitsandbytes manager = bitsandbytes.optim.GlobalOptimManager.get_instance() skipped = 0 for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) logger.info(f'skipped {module}: {skipped / 2 ** 20}M params') manager.register_module_override(module, 'weight', {'optim_bits': 32}) logger.debug(f'bitsandbytes: will optimize {module} in fp32') logger.info(f'skipped: {skipped / 2 ** 20}M params') if is_sagemaker_mp_enabled(): raise NotImplementedError("Sagemaker's distributed data parallel features are not supported by `ORTTrainer` yet.") return self.optimizer @staticmethod def get_ort_optimizer_cls_and_kwargs(args: ORTTrainingArguments) -> Tuple[Any, Any]: optimizer_kwargs = {'lr': args.learning_rate} adam_kwargs = {'betas': (args.adam_beta1, args.adam_beta2), 'eps': args.adam_epsilon} if args.optim == ORTOptimizerNames.ADAMW_ORT_FUSED: try: from onnxruntime.training.optim import FusedAdam optimizer_cls = FusedAdam optimizer_kwargs.update(adam_kwargs) except ImportError: raise ImportError('ORTTrainer tried to instantiate ORT FusedAdam but onnxruntime-training is not correctly installed!') else: raise ValueError(f'ORTTrainer cannot instantiate unsupported optimizer: {args.optim}') return (optimizer_cls, optimizer_kwargs) # File: optimum-main/optimum/onnxruntime/trainer_seq2seq.py """""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import Dataset from transformers.trainer_utils import PredictionOutput from transformers.utils import is_accelerate_available, logging from ..utils.import_utils import check_if_transformers_greater from .trainer import ORTTrainer if is_accelerate_available(): pass else: raise ImportError('The package `accelerate` is required to use the ORTTrainer. Please install it following https://huggingface.co/docs/accelerate/basic_tutorials/install.') if check_if_transformers_greater('4.33'): from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled else: from transformers.deepspeed import is_deepspeed_zero3_enabled logger = logging.get_logger(__name__) class ORTSeq2SeqTrainer(ORTTrainer): def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval', **gen_kwargs) -> Dict[str, float]: gen_kwargs = gen_kwargs.copy() if gen_kwargs.get('max_length') is None and gen_kwargs.get('max_new_tokens') is None and (self.args.generation_max_length is not None): gen_kwargs['max_length'] = self.args.generation_max_length if gen_kwargs.get('num_beams') is None and self.args.generation_num_beams is not None: gen_kwargs['num_beams'] = self.args.generation_num_beams self._gen_kwargs = gen_kwargs return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def predict(self, test_dataset: Dataset, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='test', **gen_kwargs) -> 'PredictionOutput': gen_kwargs = gen_kwargs.copy() if gen_kwargs.get('max_length') is None and gen_kwargs.get('max_new_tokens') is None and (self.args.generation_max_length is not None): gen_kwargs['max_length'] = self.args.generation_max_length if gen_kwargs.get('num_beams') is None and self.args.generation_num_beams is not None: gen_kwargs['num_beams'] = self.args.generation_num_beams self._gen_kwargs = gen_kwargs return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def prediction_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None, **gen_kwargs) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step(model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys) has_labels = 'labels' in inputs inputs = self._prepare_inputs(inputs) if len(gen_kwargs) == 0 and hasattr(self, '_gen_kwargs'): gen_kwargs = self._gen_kwargs.copy() if 'num_beams' in gen_kwargs and gen_kwargs['num_beams'] is None: gen_kwargs.pop('num_beams') if 'max_length' in gen_kwargs and gen_kwargs['max_length'] is None: gen_kwargs.pop('max_length') default_synced_gpus = True if is_deepspeed_zero3_enabled() else False gen_kwargs['synced_gpus'] = gen_kwargs['synced_gpus'] if gen_kwargs.get('synced_gpus') is not None else default_synced_gpus generation_inputs = inputs.copy() if 'labels' in generation_inputs and 'decoder_input_ids' in generation_inputs and (generation_inputs['labels'].shape == generation_inputs['decoder_input_ids'].shape): generation_inputs = {k: v for (k, v) in inputs.items() if k != 'decoder_input_ids'} generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs) if self.model.generation_config._from_model_config: self.model.generation_config._from_model_config = False gen_config = self.model.generation_config if generated_tokens.shape[-1] < gen_config.max_length: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length) elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1) with torch.no_grad(): if has_labels: with self.compute_loss_context_manager(): outputs = model(**inputs) if self.label_smoother is not None: loss = self.label_smoother(outputs, inputs['labels']).mean().detach() else: loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]).mean().detach() else: loss = None if self.args.prediction_loss_only: return (loss, None, None) if has_labels: labels = inputs['labels'] if labels.shape[-1] < gen_config.max_length: labels = self._pad_tensors_to_max_len(labels, gen_config.max_length) elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1: labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1) else: labels = None return (loss, generated_tokens, labels) def _pad_tensors_to_max_len(self, tensor, max_length): if self.tokenizer is not None and hasattr(self.tokenizer, 'pad_token_id'): pad_token_id = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id elif self.model.config.pad_token_id is not None: pad_token_id = self.model.config.pad_token_id else: raise ValueError('Pad_token_id must be set in the configuration of the model, in order to pad tensors') padded_tensor = pad_token_id * torch.ones((tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device) padded_tensor[:, :tensor.shape[-1]] = tensor return padded_tensor # File: optimum-main/optimum/onnxruntime/training_args.py import io import json import os import warnings from dataclasses import dataclass, field from pathlib import Path from typing import Optional from packaging import version from transformers import TrainingArguments from transformers.debug_utils import DebugOption from transformers.trainer_utils import EvaluationStrategy, FSDPOption, HubStrategy, IntervalStrategy, SchedulerType from transformers.training_args import OptimizerNames, default_logdir, logger from transformers.utils import ExplicitEnum, get_full_repo_name, is_accelerate_available, is_safetensors_available, is_torch_available, is_torch_bf16_cpu_available, is_torch_bf16_gpu_available, is_torch_tf32_available, logging from transformers.utils.generic import strtobool from ..utils.import_utils import check_if_transformers_greater if is_torch_available(): import torch if is_accelerate_available() and check_if_transformers_greater('4.38.0'): from transformers.trainer_pt_utils import AcceleratorConfig class ORTOptimizerNames(ExplicitEnum): ADAMW_ORT_FUSED = 'adamw_ort_fused' @dataclass class ORTTrainingArguments(TrainingArguments): optim: Optional[str] = field(default='adamw_hf', metadata={'help': 'The optimizer to use.'}) use_module_with_loss: Optional[bool] = field(default=False, metadata={'help': 'Use ModuleWithLoss Wrapper to compute loss inside the training loop, having this will help save memory for ORTModule Runs.'}) save_onnx: Optional[bool] = field(default=False, metadata={'help': 'Configure ORTModule to save onnx models. Defaults to False. The output directory of the onnx models by default is set to args.output_dir. To change the output directory, the environment variable ORTMODULE_SAVE_ONNX_PATH can be set to the destination directory path.'}) onnx_prefix: Optional[str] = field(default=None, metadata={'help': 'Prefix for the saved ORTModule file names. Must be provided if save_onnx is True.'}) onnx_log_level: Optional[str] = field(default='WARNING', metadata={'help': 'Configure ORTModule log level. Defaults to WARNING. onnx_log_level can also be set to one of VERBOSE, INFO, WARNING, ERROR, FATAL.'}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) if self.logging_dir is None and self.output_dir is not None: self.logging_dir = os.path.join(self.output_dir, default_logdir()) if self.logging_dir is not None: self.logging_dir = os.path.expanduser(self.logging_dir) if self.disable_tqdm is None: self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN if isinstance(self.eval_strategy, EvaluationStrategy): warnings.warn('using `EvaluationStrategy` for `eval_strategy` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `IntervalStrategy` instead', FutureWarning) self.eval_strategy = self.eval_strategy.value self.eval_strategy = IntervalStrategy(self.eval_strategy) self.logging_strategy = IntervalStrategy(self.logging_strategy) self.save_strategy = IntervalStrategy(self.save_strategy) self.hub_strategy = HubStrategy(self.hub_strategy) self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type) if self.do_eval is False and self.eval_strategy != IntervalStrategy.NO: self.do_eval = True if self.eval_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0): if self.logging_steps > 0: logger.info(f'using `logging_steps` to initialize `eval_steps` to {self.logging_steps}') self.eval_steps = self.logging_steps else: raise ValueError(f'evaluation strategy {self.eval_strategy} requires either non-zero --eval_steps or --logging_steps') if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps == 0: raise ValueError(f'logging strategy {self.logging_strategy} requires non-zero --logging_steps') if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps > 1: if self.logging_steps != int(self.logging_steps): raise ValueError(f'--logging_steps must be an integer if bigger than 1: {self.logging_steps}') self.logging_steps = int(self.logging_steps) if self.eval_strategy == IntervalStrategy.STEPS and self.eval_steps > 1: if self.eval_steps != int(self.eval_steps): raise ValueError(f'--eval_steps must be an integer if bigger than 1: {self.eval_steps}') self.eval_steps = int(self.eval_steps) if self.save_strategy == IntervalStrategy.STEPS and self.save_steps > 1: if self.save_steps != int(self.save_steps): raise ValueError(f'--save_steps must be an integer if bigger than 1: {self.save_steps}') self.save_steps = int(self.save_steps) if self.load_best_model_at_end: if self.eval_strategy != self.save_strategy: raise ValueError(f'--load_best_model_at_end requires the saving steps to be a multiple of the evaluation steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps {self.save_steps} and eval_steps {self.eval_steps}.') if self.eval_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0: if self.eval_steps < 1 or self.save_steps < 1: if not (self.eval_steps < 1 and self.save_steps < 1): raise ValueError(f'--load_best_model_at_end requires the saving steps to be a multiple of the evaluation steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps{self.save_steps} and eval_steps {self.eval_steps}.') LARGE_MULTIPLIER = 1000000 if self.save_steps * LARGE_MULTIPLIER % (self.eval_steps * LARGE_MULTIPLIER) != 0: raise ValueError(f'--load_best_model_at_end requires the saving steps to be a multiple of the evaluation steps, but found {self.save_steps}, which is not a multiple of {self.eval_steps}.') raise ValueError(f'--load_best_model_at_end requires the saving steps to be a round multiple of the evaluation steps, but found {self.save_steps}, which is not a round multiple of {self.eval_steps}.') safetensors_available = is_safetensors_available() if self.save_safetensors and (not safetensors_available): raise ValueError(f'--save_safetensors={self.save_safetensors} requires safetensors to be installed!') if not self.save_safetensors and safetensors_available: logger.info(f'Found safetensors installation, but --save_safetensors={self.save_safetensors}. Safetensors should be a preferred weights saving format due to security and performance reasons. If your model cannot be saved by safetensors please feel free to open an issue at https://github.com/huggingface/safetensors!') if (self.load_best_model_at_end or self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU) and self.metric_for_best_model is None: self.metric_for_best_model = 'loss' if self.greater_is_better is None and self.metric_for_best_model is not None: self.greater_is_better = self.metric_for_best_model not in ['loss', 'eval_loss'] if self.run_name is None: self.run_name = self.output_dir if self.framework == 'pt' and is_torch_available(): if self.fp16_backend and self.fp16_backend != 'auto': warnings.warn('`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `half_precision_backend` instead', FutureWarning) self.half_precision_backend = self.fp16_backend if self.bf16 or self.bf16_full_eval: if self.use_cpu and (not is_torch_bf16_cpu_available()): raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10") elif not self.use_cpu: if torch.cuda.is_available() and (not is_torch_bf16_gpu_available()): raise ValueError("Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0") if self.fp16 and self.bf16: raise ValueError('At most one of fp16 and bf16 can be True, but not both') if self.fp16_full_eval and self.bf16_full_eval: raise ValueError('At most one of fp16 and bf16 can be True for full eval, but not both') if self.bf16: if self.half_precision_backend == 'apex': raise ValueError(' `--half_precision_backend apex`: GPU bf16 is not supported by apex. Use `--half_precision_backend cuda_amp` instead') if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU: if self.eval_strategy == IntervalStrategy.NO: raise ValueError('lr_scheduler_type reduce_lr_on_plateau requires an eval strategy') if not is_torch_available(): raise ValueError('lr_scheduler_type reduce_lr_on_plateau requires torch>=0.2.0') try: self.optim = ORTOptimizerNames(self.optim) except ValueError: self.optim = OptimizerNames(self.optim) if self.adafactor: warnings.warn('`--adafactor` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--optim adafactor` instead', FutureWarning) self.optim = OptimizerNames.ADAFACTOR if self.optim == OptimizerNames.ADAMW_TORCH_FUSED and is_torch_available(): if version.parse(version.parse(torch.__version__).base_version) < version.parse('2.0.0'): raise ValueError('--optim adamw_torch_fused requires PyTorch 2.0 or higher') if version.parse(version.parse(torch.__version__).base_version) == version.parse('2.0.0') and self.fp16: raise ValueError('--optim adamw_torch_fused with --fp16 requires PyTorch>2.0') if self.save_onnx: if not self.onnx_prefix: raise ValueError('onnx_prefix must be provided if save_onnx is True') if not os.getenv('ORTMODULE_SAVE_ONNX_PATH', None): os.environ['ORTMODULE_SAVE_ONNX_PATH'] = self.output_dir os.environ['ORTMODULE_LOG_LEVEL'] = self.onnx_log_level if is_torch_available() and self.device.type != 'cuda' and (not (self.device.type == 'xla' and 'GPU_NUM_DEVICES' in os.environ)) and (self.fp16 or self.fp16_full_eval): raise ValueError('FP16 Mixed precision training with AMP or APEX (`--fp16`) and FP16 half precision evaluation (`--fp16_full_eval`) can only be used on CUDA devices.') if is_torch_available() and self.device.type != 'cuda' and (not (self.device.type == 'xla' and 'GPU_NUM_DEVICES' in os.environ)) and (self.device.type != 'cpu') and (self.bf16 or self.bf16_full_eval): raise ValueError('BF16 Mixed precision training with AMP (`--bf16`) and BF16 half precision evaluation (`--bf16_full_eval`) can only be used on CUDA or CPU/TPU/NeuronCore devices.') if self.torchdynamo is not None: warnings.warn('`torchdynamo` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `torch_compile_backend` instead', FutureWarning) self.torch_compile_backend = self.torchdynamo if (self.torch_compile_mode is not None or self.torch_compile_backend is not None) and (not self.torch_compile): self.torch_compile = True if self.torch_compile and self.torch_compile_backend is None: self.torch_compile_backend = 'inductor' if self.torch_compile: prefix = 'ACCELERATE_DYNAMO_' os.environ[prefix + 'BACKEND'] = self.torch_compile_backend if self.torch_compile_mode is not None: os.environ[prefix + 'MODE'] = self.torch_compile_mode if self.framework == 'pt' and is_torch_available() and self.torch_compile: if is_torch_tf32_available(): if self.tf32 is None and (not self.fp16) or self.bf16: logger.info("Setting TF32 in CUDA backends to speedup torch compile, you won't see any improvement otherwise.") torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True else: logger.warning('The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here.') if is_torch_available() and self.tf32 is not None: if self.tf32: if is_torch_tf32_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True else: raise ValueError('--tf32 requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7') elif is_torch_tf32_available(): torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False if self.half_precision_backend != 'apex': mixed_precision_dtype = os.environ.get('ACCELERATE_MIXED_PRECISION', 'no') if self.fp16: mixed_precision_dtype = 'fp16' elif self.bf16: mixed_precision_dtype = 'bf16' os.environ['ACCELERATE_MIXED_PRECISION'] = mixed_precision_dtype if self.report_to is None: logger.info('The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).') self.report_to = 'all' if self.report_to == 'all' or self.report_to == ['all']: from transformers.integrations import get_available_reporting_integrations self.report_to = get_available_reporting_integrations() elif self.report_to == 'none' or self.report_to == ['none']: self.report_to = [] elif not isinstance(self.report_to, list): self.report_to = [self.report_to] if self.warmup_ratio < 0 or self.warmup_ratio > 1: raise ValueError('warmup_ratio must lie in range [0,1]') elif self.warmup_ratio > 0 and self.warmup_steps > 0: logger.info('Both warmup_ratio and warmup_steps given, warmup_steps will override any effect of warmup_ratio during training') if isinstance(self.fsdp, bool): self.fsdp = 'full_shard' if self.fsdp else '' if isinstance(self.fsdp, str): self.fsdp = [FSDPOption(s) for s in self.fsdp.split()] if self.fsdp == [FSDPOption.OFFLOAD]: raise ValueError('`--fsdp offload` can\'t work on its own. It needs to be added to `--fsdp full_shard` or `--fsdp shard_grad_op`. For example, `--fsdp "full_shard offload"`.') elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.fsdp: raise ValueError('`--fsdp full_shard` is not compatible with `--fsdp shard_grad_op`.') if self.fsdp_config is None: self.fsdp_config = {} if isinstance(self.fsdp_config, str): if len(self.fsdp) == 0: warnings.warn('`--fsdp_config` is useful only when `--fsdp` is specified.') with io.open(self.fsdp_config, 'r', encoding='utf-8') as f: self.fsdp_config = json.load(f) for k in list(self.fsdp_config.keys()): if k.startswith('fsdp_'): v = self.fsdp_config.pop(k) self.fsdp_config[k[5:]] = v if self.fsdp_min_num_params > 0: warnings.warn('using `--fsdp_min_num_params` is deprecated. Use fsdp_config instead ', FutureWarning) self.fsdp_config['min_num_params'] = max(self.fsdp_config.get('min_num_params', 0), self.fsdp_min_num_params) if isinstance(self.fsdp_config.get('transformer_layer_cls_to_wrap', None), str): self.fsdp_config['transformer_layer_cls_to_wrap'] = [self.fsdp_config['transformer_layer_cls_to_wrap']] if self.fsdp_transformer_layer_cls_to_wrap is not None: warnings.warn('using `--fsdp_transformer_layer_cls_to_wrap` is deprecated. Use fsdp_config instead ', FutureWarning) self.fsdp_config['transformer_layer_cls_to_wrap'] = self.fsdp_config.get('transformer_layer_cls_to_wrap', []) + [self.fsdp_transformer_layer_cls_to_wrap] if len(self.fsdp) == 0 and self.fsdp_config['min_num_params'] > 0: warnings.warn('`min_num_params` is useful only when `--fsdp` is specified.') if len(self.fsdp) == 0 and self.fsdp_config.get('transformer_layer_cls_to_wrap', None) is not None: warnings.warn('`transformer_layer_cls_to_wrap` is useful only when `--fsdp` is specified.') if len(self.fsdp) > 0 and self.fsdp_config['min_num_params'] > 0 and (self.fsdp_config.get('transformer_layer_cls_to_wrap', None) is not None): raise ValueError('`min_num_params` and `transformer_layer_cls_to_wrap` are mutually exclusive.') self.fsdp_config['xla'] = self.fsdp_config.get('xla', False) self.fsdp_config['xla_fsdp_v2'] = self.fsdp_config.get('xla_fsdp_v2', False) self.fsdp_config['xla_fsdp_grad_ckpt'] = self.fsdp_config.get('xla_fsdp_grad_ckpt', False) if self.fsdp_config['xla']: if len(self.fsdp) > 0: self.xla_fsdp_config = self.fsdp_config.get('xla_fsdp_settings', {}) if 'compute_dtype' in self.xla_fsdp_config: self.xla_fsdp_config['compute_dtype'] = getattr(torch, self.xla_fsdp_config['compute_dtype']) if 'buffer_dtype' in self.xla_fsdp_config: self.xla_fsdp_config['buffer_dtype'] = getattr(torch, self.xla_fsdp_config['buffer_dtype']) else: warnings.warn('XLA FSDP can be used only when `--fsdp` is specified.') elif self.fsdp_config['xla_fsdp_grad_ckpt']: warnings.warn('`--xla_fsdp_grad_ckpt` is useful only when `--xla` is set to true.') if len(self.fsdp) > 0 and (not self.fsdp_config['xla']): os.environ['ACCELERATE_USE_FSDP'] = 'true' from accelerate.utils.constants import FSDP_AUTO_WRAP_POLICY, FSDP_SHARDING_STRATEGY prefix = 'FSDP_' for fsdp_option in self.fsdp: if fsdp_option.upper() in FSDP_SHARDING_STRATEGY: os.environ[f'{prefix}SHARDING_STRATEGY'] = str(FSDP_SHARDING_STRATEGY.index(fsdp_option.upper()) + 1) elif fsdp_option == FSDPOption.OFFLOAD: os.environ[f'{prefix}OFFLOAD_PARAMS'] = 'true' elif fsdp_option == FSDPOption.AUTO_WRAP: os.environ[f'{prefix}AUTO_WRAP_POLICY'] = FSDP_AUTO_WRAP_POLICY[0] if self.fsdp_config['min_num_params'] > 0: os.environ[f'{prefix}MIN_NUM_PARAMS'] = str(self.fsdp_config['min_num_params']) os.environ[f'{prefix}AUTO_WRAP_POLICY'] = FSDP_AUTO_WRAP_POLICY[1] elif self.fsdp_config.get('transformer_layer_cls_to_wrap', None) is not None: os.environ[f'{prefix}TRANSFORMER_CLS_TO_WRAP'] = ','.join(self.fsdp_config['transformer_layer_cls_to_wrap']) prefetch_policy = self.fsdp_config.get('fsdp_backward_prefetch', 'NO_PREFETCH') os.environ[f'{prefix}BACKWARD_PREFETCH'] = prefetch_policy.upper() os.environ[f'{prefix}FORWARD_PREFETCH'] = self.fsdp_config.get('forward_prefect', 'false') os.environ[f'{prefix}SYNC_MODULE_STATES'] = self.fsdp_config.get('sync_module_states', 'true') os.environ[f'{prefix}USE_ORIG_PARAMS'] = self.fsdp_config.get('use_orig_params', 'false') if is_accelerate_available() and check_if_transformers_greater('4.38.0'): if not isinstance(self.accelerator_config, AcceleratorConfig): if self.accelerator_config is None: self.accelerator_config = AcceleratorConfig() elif isinstance(self.accelerator_config, dict): self.accelerator_config = AcceleratorConfig(**self.accelerator_config) else: self.accelerator_config = AcceleratorConfig.from_json_file(self.accelerator_config) if self.dispatch_batches is not None: warnings.warn("Using `--dispatch_batches` is deprecated and will be removed in version 4.41 of 🤗 Transformers. Use `--accelerator_config {'dispatch_batches':VALUE} instead", FutureWarning) self.accelerator_config.dispatch_batches = self.dispatch_batches if self.split_batches is not None: warnings.warn("Using `--split_batches` is deprecated and will be removed in version 4.41 of 🤗 Transformers. Use `--accelerator_config {'split_batches':VALUE} instead", FutureWarning) self.accelerator_config.split_batches = self.split_batches if self.tpu_metrics_debug: warnings.warn('using `--tpu_metrics_debug` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--debug tpu_metrics_debug` instead', FutureWarning) if self.debug is None: self.debug = ' tpu_metrics_debug' else: self.debug += ' tpu_metrics_debug' self.tpu_metrics_debug = False if isinstance(self.debug, str): self.debug = [DebugOption(s) for s in self.debug.split()] elif self.debug is None: self.debug = [] self.deepspeed_plugin = None if self.deepspeed: if not is_accelerate_available(): raise ValueError('--deepspeed requires Accelerate to be installed: `pip install accelerate`.') from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig self.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.deepspeed) self.hf_deepspeed_config.trainer_config_process(self) from accelerate.utils import DeepSpeedPlugin os.environ['ACCELERATE_USE_DEEPSPEED'] = 'true' self.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.hf_deepspeed_config) elif strtobool(os.environ.get('ACCELERATE_USE_DEEPSPEED', 'false')): from accelerate.utils import DeepSpeedPlugin self.deepspeed_plugin = DeepSpeedPlugin() mixed_precision = os.environ.get('ACCELERATE_MIXED_PRECISION', 'no') self.deepspeed_plugin.set_mixed_precision(mixed_precision) self.deepspeed_plugin.set_deepspeed_weakref() if self.push_to_hub_token is not None: warnings.warn('`--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead.', FutureWarning) self.hub_token = self.push_to_hub_token if self.push_to_hub_model_id is not None: self.hub_model_id = get_full_repo_name(self.push_to_hub_model_id, organization=self.push_to_hub_organization, token=self.hub_token) if self.push_to_hub_organization is not None: warnings.warn(f'`--push_to_hub_model_id` and `--push_to_hub_organization` are deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this argument (in this case {self.hub_model_id}).', FutureWarning) else: warnings.warn(f'`--push_to_hub_model_id` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this argument (in this case {self.hub_model_id}).', FutureWarning) elif self.push_to_hub_organization is not None: self.hub_model_id = f'{self.push_to_hub_organization}/{Path(self.output_dir).name}' warnings.warn(f'`--push_to_hub_organization` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this argument (in this case {self.hub_model_id}).', FutureWarning) if self.half_precision_backend != 'apex': mixed_precision_dtype = os.environ.get('ACCELERATE_MIXED_PRECISION', 'no') if self.fp16: mixed_precision_dtype = 'fp16' elif self.bf16: mixed_precision_dtype = 'bf16' os.environ['ACCELERATE_MIXED_PRECISION'] = mixed_precision_dtype if self.use_module_with_loss is True: logger.info('Using ModuleWithLoss Wrapper.loss will be computed during training loop and it will save memory peak ') else: logger.info('Not Using ModuleWithLoss Wrapper.') # File: optimum-main/optimum/onnxruntime/training_args_seq2seq.py from dataclasses import dataclass, field from typing import Optional from transformers import Seq2SeqTrainingArguments from .training_args import ORTTrainingArguments @dataclass class ORTSeq2SeqTrainingArguments(Seq2SeqTrainingArguments, ORTTrainingArguments): optim: Optional[str] = field(default='adamw_hf', metadata={'help': 'The optimizer to use.'}) # File: optimum-main/optimum/onnxruntime/utils.py """""" import os import re from enum import Enum from inspect import signature from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from packaging import version from tqdm import tqdm from transformers import EvalPrediction from transformers.trainer_pt_utils import nested_concat from transformers.trainer_utils import EvalLoopOutput from transformers.utils import logging import onnxruntime as ort from ..exporters.onnx import OnnxConfig, OnnxConfigWithLoss from ..utils.import_utils import _is_package_available if TYPE_CHECKING: from datasets import Dataset from .modeling_ort import ORTModel logger = logging.get_logger(__name__) ONNX_WEIGHTS_NAME = 'model.onnx' ONNX_ENCODER_NAME = 'encoder_model.onnx' ONNX_DECODER_NAME = 'decoder_model.onnx' ONNX_DECODER_WITH_PAST_NAME = 'decoder_with_past_model.onnx' ONNX_DECODER_MERGED_NAME = 'decoder_model_merged.onnx' _ORT_TO_NP_TYPE = {'tensor(bool)': np.bool_, 'tensor(int8)': np.int8, 'tensor(uint8)': np.uint8, 'tensor(int16)': np.int16, 'tensor(uint16)': np.uint16, 'tensor(int32)': np.int32, 'tensor(uint32)': np.uint32, 'tensor(int64)': np.int64, 'tensor(uint64)': np.uint64, 'tensor(float16)': np.float16, 'tensor(float)': np.float32, 'tensor(double)': np.float64} def _is_gpu_available(): available_providers = ort.get_available_providers() if ('CUDAExecutionProvider' in available_providers or 'ROCMExecutionProvider' in available_providers) and torch.cuda.is_available(): return True else: return False def is_onnxruntime_training_available(): path_training_dependecy = os.path.join(ort.__path__[0], 'training') if os.path.exists(path_training_dependecy): return True else: return False def is_cupy_available(): return _is_package_available('cupy') class ORTConfigManager: _conf = {'albert': 'bert', 'bart': 'bart', 'bert': 'bert', 'big-bird': 'bert', 'blenderbot': 'bert', 'bloom': 'gpt2', 'camembert': 'bert', 'codegen': 'gpt2', 'deberta': 'bert', 'deberta-v2': 'bert', 'distilbert': 'bert', 'electra': 'bert', 'gpt2': 'gpt2', 'gpt-bigcode': 'gpt2', 'gpt-neo': 'gpt2', 'gpt-neox': 'gpt2', 'gptj': 'gpt2', 'longt5': 'bert', 'llama': 'gpt2', 'marian': 'bart', 'mbart': 'bart', 'mistral': 'gpt2', 'mpnet': 'bert', 'mt5': 'bart', 'm2m-100': 'bart', 'nystromformer': 'bert', 'pegasus': 'bert', 'roberta': 'bert', 'segformer': 'vit', 't5': 'bert', 'vit': 'vit', 'whisper': 'bart', 'xlm-roberta': 'bert', 'pix2struct': 'vit'} @classmethod def get_model_ort_type(cls, model_type: str) -> str: model_type = model_type.replace('_', '-') cls.check_supported_model(model_type) return cls._conf[model_type] @classmethod def check_supported_model(cls, model_type: str): if model_type not in cls._conf: model_types = ', '.join(cls._conf.keys()) raise KeyError(f'{model_type} model type is not supported yet. Only {model_types} are supported. If you want to support {model_type} please propose a PR or open up an issue.') @classmethod def check_optimization_supported_model(cls, model_type: str, optimization_config): supported_model_types_for_optimization = ['bart', 'bert', 'gpt2', 'tnlr', 't5', 'unet', 'vae', 'clip', 'vit', 'swin'] model_type = model_type.replace('_', '-') if model_type not in cls._conf or cls._conf[model_type] not in supported_model_types_for_optimization: raise NotImplementedError(f"ONNX Runtime doesn't support the graph optimization of {model_type} yet. Only {list(cls._conf.keys())} are supported. If you want to support {model_type} please propose a PR or open up an issue in ONNX Runtime: https://github.com/microsoft/onnxruntime.") def generate_identified_filename(filename, identifier): return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix) def wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig: return OnnxConfigWithLoss(onnx_config) def get_device_for_provider(provider: str, provider_options: Dict) -> torch.device: if provider in ['CUDAExecutionProvider', 'TensorrtExecutionProvider', 'ROCMExecutionProvider']: return torch.device(f"cuda:{provider_options['device_id']}") else: return torch.device('cpu') def get_provider_for_device(device: torch.device) -> str: if device.type.lower() == 'cuda': if 'ROCMExecutionProvider' in ort.get_available_providers(): return 'ROCMExecutionProvider' else: return 'CUDAExecutionProvider' return 'CPUExecutionProvider' def parse_device(device: Union[torch.device, str, int]) -> Tuple[torch.device, Dict]: if device == -1: device = torch.device('cpu') else: device = torch._C._nn._parse_to(device)[0] provider_options = {} if device.type == 'cuda': if device.index is None: device = torch.device('cuda:0') provider_options['device_id'] = device.index return (device, provider_options) def validate_provider_availability(provider: str): if version.parse(ort.__version__) < version.parse('1.16.0') and os.name != 'nt' and (provider in ['CUDAExecutionProvider', 'TensorrtExecutionProvider']): path_cuda_lib = os.path.join(ort.__path__[0], 'capi', 'libonnxruntime_providers_cuda.so') path_trt_lib = os.path.join(ort.__path__[0], 'capi', 'libonnxruntime_providers_tensorrt.so') path_dependecy_loading = os.path.join(ort.__path__[0], 'capi', '_ld_preload.py') with open(path_dependecy_loading, 'r') as f: file_string = f.read() if 'ORT_CUDA' not in file_string or 'ORT_TENSORRT' not in file_string: if os.path.isfile(path_cuda_lib) and os.path.isfile(path_trt_lib): raise ImportError(f'`onnxruntime-gpu` is installed, but GPU dependencies are not loaded. It is likely there is a conflicting install between `onnxruntime` and `onnxruntime-gpu`. Please install only `onnxruntime-gpu` in order to use {provider}.') elif os.path.isfile(path_cuda_lib) and is_onnxruntime_training_available(): if provider == 'TensorrtExecutionProvider': raise ImportError(f"Asked to use {provider}, but `onnxruntime-training` package doesn't support {provider}. Please use `CUDAExecutionProvider` instead.") else: raise ImportError(f'Asked to use {provider}, but `onnxruntime-gpu` package was not found. Make sure to install `onnxruntime-gpu` package instead of `onnxruntime`.') if provider == 'CUDAExecutionProvider': if os.environ.get('ORT_CUDA_UNAVAILABLE', '0') == '1': raise ImportError('`onnxruntime-gpu` package is installed, but CUDA requirements could not be loaded. Make sure to meet the required dependencies: https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html') if provider == 'TensorrtExecutionProvider': if os.environ.get('ORT_TENSORRT_UNAVAILABLE', '0') == '1': raise ImportError('`onnxruntime-gpu` package is installed, but TensorRT requirements could not be loaded. Make sure to meet the required dependencies following https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html and https://hf.co/docs/optimum/onnxruntime/usage_guides/gpu#tensorrtexecutionprovider .') available_providers = ort.get_available_providers() if provider not in available_providers: raise ValueError(f'Asked to use {provider} as an ONNX Runtime execution provider, but the available execution providers are {available_providers}.') def check_io_binding(providers: List[str], use_io_binding: Optional[bool]=None) -> bool: if use_io_binding is None and providers[0] == 'CUDAExecutionProvider': use_io_binding = True elif providers[0] != 'CPUExecutionProvider' and providers[0] != 'CUDAExecutionProvider': if use_io_binding is True: logger.warning('No need to enable IO Binding if the provider used is neither CPUExecutionProvider nor CUDAExecutionProvider. IO Binding will be turned off.') use_io_binding = False return use_io_binding def get_ordered_input_names(input_names: List[str], func: Callable) -> List[str]: signature_func = signature(func) _ordered_input_names = [] for param in signature_func.parameters: param_regex = re.compile(f'{param}(\\.\\d*)?') for name in input_names: if re.search(param_regex, name): _ordered_input_names.append(name) return _ordered_input_names class ORTQuantizableOperator(Enum): Gather = 'Gather' Transpose = 'Transpose' EmbedLayerNormalizationQuant = 'EmbedLayerNormalization' Conv = 'Conv' MatMul = 'MatMul' Add = 'Add' Mul = 'Mul' Relu = 'Relu' Clip = 'Clip' LeakyRelu = 'LeakyRelu' Sigmoid = 'Sigmoid' MaxPool = 'MaxPool' GlobalAveragePool = 'GlobalAveragePool' Split = 'Split' Pad = 'Pad' Reshape = 'Reshape' Squeeze = 'Squeeze' Unsqueeze = 'Unsqueeze' Resize = 'Resize' AveragePool = 'AveragePool' Concat = 'Concat' def evaluation_loop(model: 'ORTModel', dataset: 'Dataset', label_names: Optional[List[str]]=None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None): all_preds = None all_labels = None for inputs in tqdm(dataset, desc='Evaluation'): has_labels = all((inputs.get(k) is not None for k in label_names)) if has_labels: labels = tuple((np.array([inputs.get(name)]) for name in label_names)) if len(labels) == 1: labels = labels[0] else: labels = None inputs = {key: np.array([inputs[key]]) for key in model.input_names if key in inputs} preds = model(**inputs) if len(preds) == 1: preds = preds[0] all_preds = preds if all_preds is None else nested_concat(all_preds, preds, padding_index=-100) all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) if compute_metrics is not None and all_preds is not None and (all_labels is not None): metrics = compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) else: metrics = {} return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=len(dataset)) # File: optimum-main/optimum/pipelines/diffusers/pipeline_latent_consistency.py import logging from typing import Callable, List, Optional, Union import numpy as np import torch from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from .pipeline_stable_diffusion import StableDiffusionPipelineMixin logger = logging.getLogger(__name__) class LatentConsistencyPipelineMixin(StableDiffusionPipelineMixin): def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=4, original_inference_steps: int=None, guidance_scale: float=8.5, num_images_per_prompt: int=1, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, output_type: str='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): height = height or self.unet.config['sample_size'] * self.vae_scale_factor width = width or self.unet.config['sample_size'] * self.vae_scale_factor negative_prompt = None negative_prompt_embeds = None self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, False, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) self.scheduler.set_timesteps(num_inference_steps, original_inference_steps=original_inference_steps) timesteps = self.scheduler.timesteps latents = self.prepare_latents(batch_size * num_images_per_prompt, self.unet.config['in_channels'], height, width, prompt_embeds.dtype, generator, latents) bs = batch_size * num_images_per_prompt w = np.full(bs, guidance_scale - 1, dtype=prompt_embeds.dtype) w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config['time_cond_proj_dim'], dtype=prompt_embeds.dtype) timestep_dtype = self.unet.input_dtype.get('timestep', np.float32) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for (i, t) in enumerate(self.progress_bar(timesteps)): timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latents, timestep=timestep, encoder_hidden_states=prompt_embeds, timestep_cond=w_embedding)[0] (latents, denoised) = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), return_dict=False) (latents, denoised) = (latents.numpy(), denoised.numpy()) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': image = denoised has_nsfw_concept = None else: denoised /= self.vae_decoder.config['scaling_factor'] image = np.concatenate([self.vae_decoder(latent_sample=denoised[i:i + 1])[0] for i in range(denoised.shape[0])]) (image, has_nsfw_concept) = self.run_safety_checker(image) if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=None): w = w * 1000 half_dim = embedding_dim // 2 emb = np.log(10000.0) / (half_dim - 1) emb = np.exp(np.arange(half_dim, dtype=dtype) * -emb) emb = w[:, None] * emb[None, :] emb = np.concatenate([np.sin(emb), np.cos(emb)], axis=1) if embedding_dim % 2 == 1: emb = np.pad(emb, [(0, 0), (0, 1)]) assert emb.shape == (w.shape[0], embedding_dim) return emb # File: optimum-main/optimum/pipelines/diffusers/pipeline_stable_diffusion.py import inspect import logging from typing import Callable, List, Optional, Union import numpy as np import torch from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from .pipeline_utils import DiffusionPipelineMixin, rescale_noise_cfg logger = logging.getLogger(__name__) class StableDiffusionPipelineMixin(DiffusionPipelineMixin): def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: int, do_classifier_free_guidance: bool, negative_prompt: Optional[Union[str, list]], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='np').input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs(self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = generator.randn(*shape).astype(dtype) elif latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents * np.float64(self.scheduler.init_noise_sigma) return latents def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: str='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1, guidance_rescale: float=0.0): height = height or self.unet.config.get('sample_size', 64) * self.vae_scale_factor width = width or self.unet.config.get('sample_size', 64) * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps latents = self.prepare_latents(batch_size * num_images_per_prompt, self.unet.config.get('in_channels', 4), height, width, prompt_embeds.dtype, generator, latents) accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta timestep_dtype = self.unet.input_dtype.get('timestep', np.float32) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds) noise_pred = noise_pred[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': image = latents has_nsfw_concept = None else: latents /= self.vae_decoder.config.get('scaling_factor', 0.18215) image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) (image, has_nsfw_concept) = self.run_safety_checker(image) if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def run_safety_checker(self, image: np.ndarray): if self.safety_checker is None: has_nsfw_concept = None else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='np').pixel_values.astype(image.dtype) (images, has_nsfw_concept) = ([], []) for i in range(image.shape[0]): (image_i, has_nsfw_concept_i) = self.safety_checker(clip_input=safety_checker_input[i:i + 1], images=image[i:i + 1]) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) return (image, has_nsfw_concept) # File: optimum-main/optimum/pipelines/diffusers/pipeline_stable_diffusion_img2img.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import deprecate from .pipeline_stable_diffusion import StableDiffusionPipelineMixin class StableDiffusionImg2ImgPipelineMixin(StableDiffusionPipelineMixin): def check_inputs(self, prompt: Union[str, List[str]], strength: float, callback_steps: int, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def __call__(self, prompt: Optional[Union[str, List[str]]]=None, image: Union[np.ndarray, PIL.Image.Image]=None, strength: float=0.8, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[np.random.RandomState]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: str='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random self.scheduler.set_timesteps(num_inference_steps) image = self.image_processor.preprocess(image) do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) latents_dtype = prompt_embeds.dtype image = image.astype(latents_dtype) init_latents = self.vae_encoder(sample=image)[0] scaling_factor = self.vae_decoder.config.get('scaling_factor', 0.18215) init_latents = scaling_factor * init_latents if isinstance(prompt, str): prompt = [prompt] if len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] == 0: deprecation_message = f'You have passed {len(prompt)} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = len(prompt) // init_latents.shape[0] init_latents = np.concatenate([init_latents] * additional_image_per_prompt * num_images_per_prompt, axis=0) elif len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {len(prompt)} text prompts.') else: init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) offset = self.scheduler.config.get('steps_offset', 0) init_timestep = int(num_inference_steps * strength) + offset init_timestep = min(init_timestep, num_inference_steps) timesteps = self.scheduler.timesteps.numpy()[-init_timestep] timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) noise = generator.randn(*init_latents.shape).astype(latents_dtype) init_latents = self.scheduler.add_noise(torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps)) init_latents = init_latents.numpy() accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta latents = init_latents t_start = max(num_inference_steps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:].numpy() timestep_dtype = self.unet.input_dtype.get('timestep', np.float32) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': image = latents has_nsfw_concept = None else: latents /= scaling_factor image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) (image, has_nsfw_concept) = self.run_safety_checker(image) if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: optimum-main/optimum/pipelines/diffusers/pipeline_stable_diffusion_inpaint.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import PIL_INTERPOLATION from .pipeline_stable_diffusion import StableDiffusionPipelineMixin def prepare_mask_and_masked_image(image, mask, latents_shape, vae_scale_factor): image = np.array(image.convert('RGB').resize((latents_shape[1] * vae_scale_factor, latents_shape[0] * vae_scale_factor))) image = image[None].transpose(0, 3, 1, 2) image = image.astype(np.float32) / 127.5 - 1.0 image_mask = np.array(mask.convert('L').resize((latents_shape[1] * vae_scale_factor, latents_shape[0] * vae_scale_factor))) masked_image = image * (image_mask < 127.5) mask = mask.resize((latents_shape[1], latents_shape[0]), PIL_INTERPOLATION['nearest']) mask = np.array(mask.convert('L')) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 return (mask, masked_image) class StableDiffusionInpaintPipelineMixin(StableDiffusionPipelineMixin): def check_inputs(self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], image: PIL.Image.Image, mask_image: PIL.Image.Image, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: str='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): height = height or self.unet.config.get('sample_size', 64) * self.vae_scale_factor width = width or self.unet.config.get('sample_size', 64) * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) num_channels_latents = self.vae_decoder.config.get('latent_channels', 4) num_channels_unet = self.unet.config.get('in_channels', 9) latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) latents_dtype = prompt_embeds.dtype if latents is None: latents = generator.randn(*latents_shape).astype(latents_dtype) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') (mask, masked_image) = prepare_mask_and_masked_image(image, mask_image, latents_shape[-2:], self.vae_scale_factor) mask = mask.astype(latents.dtype) masked_image = masked_image.astype(latents.dtype) masked_image_latents = self.vae_encoder(sample=masked_image)[0] scaling_factor = self.vae_decoder.config.get('scaling_factor', 0.18215) masked_image_latents = scaling_factor * masked_image_latents mask = mask.repeat(batch_size * num_images_per_prompt, 0) masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 0) mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = np.concatenate([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents if num_channels_unet == 9: num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: expects {num_channels_unet} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') elif num_channels_unet != 4: raise ValueError(f'The unet {self.unet.__class__} should have either 4 or 9 input channels, not {num_channels_unet}.') latents = latents * np.float64(self.scheduler.init_noise_sigma) accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta timestep_dtype = self.unet.input_dtype.get('timestep', np.float32) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() if num_channels_unet == 9: latent_model_input = np.concatenate([latent_model_input, mask, masked_image_latents], axis=1) timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': image = latents has_nsfw_concept = None else: latents /= scaling_factor image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) (image, has_nsfw_concept) = self.run_safety_checker(image) if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: optimum-main/optimum/pipelines/diffusers/pipeline_stable_diffusion_xl.py import inspect import logging from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput from .pipeline_utils import DiffusionPipelineMixin, rescale_noise_cfg logger = logging.getLogger(__name__) class StableDiffusionXLPipelineMixin(DiffusionPipelineMixin): def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: int, do_classifier_free_guidance: bool, negative_prompt: Optional[Union[str, list]], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, pooled_prompt_embeds: Optional[np.ndarray]=None, negative_pooled_prompt_embeds: Optional[np.ndarray]=None): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='np').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not np.array_equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(input_ids=text_input_ids.astype(text_encoder.input_dtype.get('input_ids', np.int32))) pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds[-2] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds_list.append(prompt_embeds) prompt_embeds = np.concatenate(prompt_embeds_list, axis=-1) zero_out_negative_prompt = negative_prompt is None and self.config['force_zeros_for_empty_prompt'] if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = np.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = np.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): max_length = prompt_embeds.shape[1] uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = text_encoder(input_ids=uncond_input.input_ids.astype(text_encoder.input_dtype.get('input_ids', np.int32))) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[-2] negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = np.concatenate(negative_prompt_embeds_list, axis=-1) pooled_prompt_embeds = np.repeat(pooled_prompt_embeds, num_images_per_prompt, axis=0) negative_pooled_prompt_embeds = np.repeat(negative_pooled_prompt_embeds, num_images_per_prompt, axis=0) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, pooled_prompt_embeds: Optional[np.ndarray]=None, negative_pooled_prompt_embeds: Optional[np.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = generator.randn(*shape).astype(dtype) elif latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents * np.float64(self.scheduler.init_noise_sigma) return latents def prepare_extra_step_kwargs(self, generator, eta): extra_step_kwargs = {} accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_eta: extra_step_kwargs['eta'] = eta return extra_step_kwargs def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, pooled_prompt_embeds: Optional[np.ndarray]=None, negative_pooled_prompt_embeds: Optional[np.ndarray]=None, output_type: str='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None): height = height or self.unet.config['sample_size'] * self.vae_scale_factor width = width or self.unet.config['sample_size'] * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds) self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps latents = self.prepare_latents(batch_size * num_images_per_prompt, self.unet.config.get('in_channels', 4), height, width, prompt_embeds.dtype, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds add_time_ids = (original_size + crops_coords_top_left + target_size,) add_time_ids = np.array(add_time_ids, dtype=prompt_embeds.dtype) if do_classifier_free_guidance: prompt_embeds = np.concatenate((negative_prompt_embeds, prompt_embeds), axis=0) add_text_embeds = np.concatenate((negative_pooled_prompt_embeds, add_text_embeds), axis=0) add_time_ids = np.concatenate((add_time_ids, add_time_ids), axis=0) add_time_ids = np.repeat(add_time_ids, batch_size * num_images_per_prompt, axis=0) timestep_dtype = self.unet.input_dtype.get('timestep', np.float32) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, text_embeds=add_text_embeds, time_ids=add_time_ids) noise_pred = noise_pred[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': image = latents else: latents /= self.vae_decoder.config.get('scaling_factor', 0.18215) image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: optimum-main/optimum/pipelines/diffusers/pipeline_stable_diffusion_xl_img2img.py import inspect import logging from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput from .pipeline_utils import DiffusionPipelineMixin, rescale_noise_cfg logger = logging.getLogger(__name__) class StableDiffusionXLImg2ImgPipelineMixin(DiffusionPipelineMixin): def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: int, do_classifier_free_guidance: bool, negative_prompt: Optional[Union[str, list]], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, pooled_prompt_embeds: Optional[np.ndarray]=None, negative_pooled_prompt_embeds: Optional[np.ndarray]=None): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='np').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not np.array_equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(input_ids=text_input_ids.astype(text_encoder.input_dtype.get('input_ids', np.int32))) pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds[-2] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds_list.append(prompt_embeds) prompt_embeds = np.concatenate(prompt_embeds_list, axis=-1) zero_out_negative_prompt = negative_prompt is None and self.config['force_zeros_for_empty_prompt'] if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = np.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = np.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): max_length = prompt_embeds.shape[1] uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = text_encoder(input_ids=uncond_input.input_ids.astype(text_encoder.input_dtype.get('input_ids', np.int32))) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[-2] negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = np.concatenate(negative_prompt_embeds_list, axis=-1) pooled_prompt_embeds = np.repeat(pooled_prompt_embeds, num_images_per_prompt, axis=0) negative_pooled_prompt_embeds = np.repeat(negative_pooled_prompt_embeds, num_images_per_prompt, axis=0) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt: Union[str, List[str]], strength: float, callback_steps: int, negative_prompt: Optional[str]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def get_timesteps(self, num_inference_steps, strength): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:].numpy() return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None): batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: init_latents = self.vae_encoder(sample=image)[0] * self.vae_decoder.config.get('scaling_factor', 0.18215) if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = np.concatenate([init_latents] * additional_image_per_prompt, axis=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = np.concatenate([init_latents], axis=0) noise = generator.randn(*init_latents.shape).astype(dtype) init_latents = self.scheduler.add_noise(torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timestep)) return init_latents.numpy() def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype): if self.config.get('requires_aesthetics_score'): add_time_ids = (original_size + crops_coords_top_left + (aesthetic_score,),) add_neg_time_ids = (original_size + crops_coords_top_left + (negative_aesthetic_score,),) else: add_time_ids = (original_size + crops_coords_top_left + target_size,) add_neg_time_ids = (original_size + crops_coords_top_left + target_size,) add_time_ids = np.array(add_time_ids, dtype=dtype) add_neg_time_ids = np.array(add_neg_time_ids, dtype=dtype) return (add_time_ids, add_neg_time_ids) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, image: Union[np.ndarray, PIL.Image.Image]=None, strength: float=0.3, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, pooled_prompt_embeds: Optional[np.ndarray]=None, negative_pooled_prompt_embeds: Optional[np.ndarray]=None, output_type: str='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5): self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds) image = self.image_processor.preprocess(image) self.scheduler.set_timesteps(num_inference_steps) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength) latent_timestep = np.repeat(timesteps[:1], batch_size * num_images_per_prompt, axis=0) timestep_dtype = self.unet.input_dtype.get('timestep', np.float32) latents_dtype = prompt_embeds.dtype image = image.astype(latents_dtype) latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, latents_dtype, generator) extra_step_kwargs = {} accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_eta: extra_step_kwargs['eta'] = eta (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype=prompt_embeds.dtype) if do_classifier_free_guidance: prompt_embeds = np.concatenate((negative_prompt_embeds, prompt_embeds), axis=0) add_text_embeds = np.concatenate((negative_pooled_prompt_embeds, add_text_embeds), axis=0) add_time_ids = np.concatenate((add_time_ids, add_time_ids), axis=0) add_time_ids = np.repeat(add_time_ids, batch_size * num_images_per_prompt, axis=0) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, text_embeds=add_text_embeds, time_ids=add_time_ids) noise_pred = noise_pred[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': image = latents else: latents /= self.vae_decoder.config.get('scaling_factor', 0.18215) image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: optimum-main/optimum/pipelines/diffusers/pipeline_utils.py import warnings from typing import List, Optional, Union import numpy as np import PIL import torch from diffusers import ConfigMixin from diffusers.image_processor import VaeImageProcessor as DiffusersVaeImageProcessor from diffusers.utils.pil_utils import PIL_INTERPOLATION from PIL import Image from tqdm.auto import tqdm class DiffusionPipelineMixin(ConfigMixin): @staticmethod def numpy_to_pil(images): if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype('uint8') if images.shape[-1] == 1: pil_images = [Image.fromarray(image.squeeze(), mode='L') for image in images] else: pil_images = [Image.fromarray(image) for image in images] return pil_images def progress_bar(self, iterable=None, total=None): if not hasattr(self, '_progress_bar_config'): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.') if iterable is not None: return tqdm(iterable, **self._progress_bar_config) elif total is not None: return tqdm(total=total, **self._progress_bar_config) else: raise ValueError('Either `total` or `iterable` has to be defined.') def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = np.std(noise_pred_text, axis=tuple(range(1, noise_pred_text.ndim)), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, noise_cfg.ndim)), keepdims=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class VaeImageProcessor(DiffusersVaeImageProcessor): @staticmethod def denormalize(images: np.ndarray): return np.clip(images / 2 + 0.5, 0, 1) def preprocess(self, image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], height: Optional[int]=None, width: Optional[int]=None) -> np.ndarray: supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) do_convert_grayscale = getattr(self.config, 'do_convert_grayscale', False) if do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and (image.ndim == 3): if isinstance(image, torch.Tensor): image = image.unsqueeze(1) elif image.shape[-1] == 1: image = np.expand_dims(image, axis=0) else: image = np.expand_dims(image, axis=-1) if isinstance(image, supported_formats): image = [image] elif not (isinstance(image, list) and all((isinstance(i, supported_formats) for i in image))): raise ValueError(f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}") if isinstance(image[0], PIL.Image.Image): if self.config.do_convert_rgb: image = [self.convert_to_rgb(i) for i in image] elif do_convert_grayscale: image = [self.convert_to_grayscale(i) for i in image] if self.config.do_resize: (height, width) = self.get_height_width(image[0], height, width) image = [self.resize(i, height, width) for i in image] image = self.reshape(self.pil_to_numpy(image)) else: if isinstance(image[0], torch.Tensor): image = [self.pt_to_numpy(elem) for elem in image] image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) else: image = self.reshape(np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)) if do_convert_grayscale and image.ndim == 3: image = np.expand_dims(image, 1) if image.shape[1] == 4: return image if self.config.do_resize: (height, width) = self.get_height_width(image, height, width) image = self.resize(image, height, width) do_normalize = self.config.do_normalize if image.min() < 0 and do_normalize: warnings.warn(f'Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]', FutureWarning) do_normalize = False if do_normalize: image = self.normalize(image) if getattr(self.config, 'do_binarize', False): image = self.binarize(image) return image def postprocess(self, image: np.ndarray, output_type: str='pil', do_denormalize: Optional[List[bool]]=None): if not isinstance(image, np.ndarray): raise ValueError(f'Input for postprocessing is in incorrect format: {type(image)}. We only support np array') if output_type not in ['latent', 'np', 'pil']: deprecation_message = f'the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: `pil`, `np`, `pt`, `latent`' warnings.warn(deprecation_message, FutureWarning) output_type = 'np' if output_type == 'latent': return image if do_denormalize is None: do_denormalize = [self.config.do_normalize] * image.shape[0] image = np.stack([self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])], axis=0) image = image.transpose((0, 2, 3, 1)) if output_type == 'pil': image = self.numpy_to_pil(image) return image def get_height_width(self, image: [PIL.Image.Image, np.ndarray], height: Optional[int]=None, width: Optional[int]=None): height = height or (image.height if isinstance(image, PIL.Image.Image) else image.shape[-2]) width = width or (image.width if isinstance(image, PIL.Image.Image) else image.shape[-1]) (width, height) = (x - x % self.config.vae_scale_factor for x in (width, height)) return (height, width) @staticmethod def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor: if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images) return images @staticmethod def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray: images = images.cpu().float().numpy() return images @staticmethod def reshape(images: np.ndarray) -> np.ndarray: if images.ndim == 3: images = images[..., None] return images.transpose(0, 3, 1, 2) def resize(self, image: [PIL.Image.Image, np.ndarray, torch.Tensor], height: Optional[int]=None, width: Optional[int]=None) -> [PIL.Image.Image, np.ndarray, torch.Tensor]: if isinstance(image, PIL.Image.Image): image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample]) elif isinstance(image, torch.Tensor): image = torch.nn.functional.interpolate(image, size=(height, width)) elif isinstance(image, np.ndarray): image = self.numpy_to_pt(image) image = torch.nn.functional.interpolate(image, size=(height, width)) image = self.pt_to_numpy(image) return image # File: optimum-main/optimum/pipelines/diffusers/watermark.py import numpy as np from imwatermark import WatermarkEncoder WATERMARK_MESSAGE = 197828617679262 WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class StableDiffusionXLWatermarker: def __init__(self): self.watermark = WATERMARK_BITS self.encoder = WatermarkEncoder() self.encoder.set_watermark('bits', self.watermark) def apply_watermark(self, images: np.array): if images.shape[-1] < 256: return images if images.dtype == np.float16: images = images.astype(np.float32) images = (255 * (images / 2 + 0.5)).transpose((0, 2, 3, 1)) images = np.array([self.encoder.encode(image, 'dwtDct') for image in images]).transpose((0, 3, 1, 2)) np.clip(2 * (images / 255 - 0.5), -1.0, 1.0, out=images) return images # File: optimum-main/optimum/pipelines/pipelines_base.py """""" from typing import Any, Dict, Optional, Union from transformers import AudioClassificationPipeline, AutoConfig, AutomaticSpeechRecognitionPipeline, FeatureExtractionPipeline, FillMaskPipeline, ImageClassificationPipeline, ImageSegmentationPipeline, ImageToTextPipeline, Pipeline, PreTrainedTokenizer, PreTrainedTokenizerFast, QuestionAnsweringPipeline, SequenceFeatureExtractor, SummarizationPipeline, Text2TextGenerationPipeline, TextClassificationPipeline, TextGenerationPipeline, TokenClassificationPipeline, TranslationPipeline, ZeroShotClassificationPipeline from transformers import pipeline as transformers_pipeline from transformers.feature_extraction_utils import PreTrainedFeatureExtractor from transformers.onnx.utils import get_preprocessor from transformers.pipelines import SUPPORTED_TASKS as TRANSFORMERS_SUPPORTED_TASKS from transformers.pipelines import infer_framework_load_model from ..bettertransformer import BetterTransformer from ..utils import check_if_transformers_greater, is_onnxruntime_available from ..utils.file_utils import find_files_matching_pattern if is_onnxruntime_available(): from ..onnxruntime import ORTModelForAudioClassification, ORTModelForCausalLM, ORTModelForFeatureExtraction, ORTModelForImageClassification, ORTModelForMaskedLM, ORTModelForQuestionAnswering, ORTModelForSemanticSegmentation, ORTModelForSeq2SeqLM, ORTModelForSequenceClassification, ORTModelForSpeechSeq2Seq, ORTModelForTokenClassification, ORTModelForVision2Seq from ..onnxruntime.modeling_ort import ORTModel ORT_SUPPORTED_TASKS = {'feature-extraction': {'impl': FeatureExtractionPipeline, 'class': (ORTModelForFeatureExtraction,), 'default': 'distilbert-base-cased', 'type': 'text'}, 'fill-mask': {'impl': FillMaskPipeline, 'class': (ORTModelForMaskedLM,), 'default': 'bert-base-cased', 'type': 'text'}, 'image-classification': {'impl': ImageClassificationPipeline, 'class': (ORTModelForImageClassification,), 'default': 'google/vit-base-patch16-224', 'type': 'image'}, 'image-segmentation': {'impl': ImageSegmentationPipeline, 'class': (ORTModelForSemanticSegmentation,) if is_onnxruntime_available() else (), 'default': 'nvidia/segformer-b0-finetuned-ade-512-512', 'type': 'image'}, 'question-answering': {'impl': QuestionAnsweringPipeline, 'class': (ORTModelForQuestionAnswering,), 'default': 'distilbert-base-cased-distilled-squad', 'type': 'text'}, 'text-classification': {'impl': TextClassificationPipeline, 'class': (ORTModelForSequenceClassification,), 'default': 'distilbert-base-uncased-finetuned-sst-2-english', 'type': 'text'}, 'text-generation': {'impl': TextGenerationPipeline, 'class': (ORTModelForCausalLM,), 'default': 'distilgpt2', 'type': 'text'}, 'token-classification': {'impl': TokenClassificationPipeline, 'class': (ORTModelForTokenClassification,), 'default': 'dbmdz/bert-large-cased-finetuned-conll03-english', 'type': 'text'}, 'zero-shot-classification': {'impl': ZeroShotClassificationPipeline, 'class': (ORTModelForSequenceClassification,), 'default': 'facebook/bart-large-mnli', 'type': 'text'}, 'summarization': {'impl': SummarizationPipeline, 'class': (ORTModelForSeq2SeqLM,), 'default': 't5-base', 'type': 'text'}, 'translation': {'impl': TranslationPipeline, 'class': (ORTModelForSeq2SeqLM,), 'default': 't5-small', 'type': 'text'}, 'text2text-generation': {'impl': Text2TextGenerationPipeline, 'class': (ORTModelForSeq2SeqLM,), 'default': 't5-small', 'type': 'text'}, 'automatic-speech-recognition': {'impl': AutomaticSpeechRecognitionPipeline, 'class': (ORTModelForSpeechSeq2Seq,), 'default': 'openai/whisper-tiny.en', 'type': 'multimodal'}, 'image-to-text': {'impl': ImageToTextPipeline, 'class': (ORTModelForVision2Seq,), 'default': 'nlpconnect/vit-gpt2-image-captioning', 'type': 'multimodal'}, 'audio-classification': {'impl': AudioClassificationPipeline, 'class': (ORTModelForAudioClassification,), 'default': 'superb/hubert-base-superb-ks', 'type': 'audio'}} else: ORT_SUPPORTED_TASKS = {} def load_bettertransformer(model, targeted_task, load_tokenizer=None, tokenizer=None, feature_extractor=None, load_feature_extractor=None, SUPPORTED_TASKS=None, subfolder: str='', token: Optional[Union[bool, str]]=None, revision: str='main', model_kwargs: Optional[Dict[str, Any]]=None, config: AutoConfig=None, hub_kwargs: Optional[Dict]=None, **kwargs): if model_kwargs is None: if check_if_transformers_greater('4.36.0'): model_kwargs = {'attn_implementation': 'eager'} else: model_kwargs = {} if model is None: model_id = SUPPORTED_TASKS[targeted_task]['default'] elif isinstance(model, str): model_id = model else: model_id = None model_classes = {'pt': SUPPORTED_TASKS[targeted_task]['pt']} (framework, model) = infer_framework_load_model(model, model_classes=model_classes, config=config, framework='pt', task=targeted_task, **hub_kwargs, **model_kwargs) if framework == 'tf': raise NotImplementedError('BetterTransormer is PyTorch-specific. It will not work with the provided TensorFlow model.') model = BetterTransformer.transform(model, **kwargs) return (model, model_id, tokenizer, feature_extractor) def load_ort_pipeline(model, targeted_task, load_tokenizer, tokenizer, feature_extractor, load_feature_extractor, SUPPORTED_TASKS, subfolder: str='', token: Optional[Union[bool, str]]=None, revision: str='main', model_kwargs: Optional[Dict[str, Any]]=None, config: AutoConfig=None, **kwargs): if model_kwargs is None: model_kwargs = {} if model is None: model_id = SUPPORTED_TASKS[targeted_task]['default'] model = SUPPORTED_TASKS[targeted_task]['class'][0].from_pretrained(model_id, export=True) elif isinstance(model, str): from ..onnxruntime.modeling_seq2seq import ENCODER_ONNX_FILE_PATTERN, ORTModelForConditionalGeneration model_id = model ort_model_class = SUPPORTED_TASKS[targeted_task]['class'][0] if issubclass(ort_model_class, ORTModelForConditionalGeneration): pattern = ENCODER_ONNX_FILE_PATTERN else: pattern = '.+?.onnx' onnx_files = find_files_matching_pattern(model, pattern, glob_pattern='**/*.onnx', subfolder=subfolder, token=token, revision=revision) export = len(onnx_files) == 0 model = ort_model_class.from_pretrained(model, export=export, **model_kwargs) elif isinstance(model, ORTModel): if tokenizer is None and load_tokenizer: for preprocessor in model.preprocessors: if isinstance(preprocessor, (PreTrainedTokenizer, PreTrainedTokenizerFast)): tokenizer = preprocessor break if tokenizer is None: raise ValueError('Could not automatically find a tokenizer for the ORTModel, you must pass a tokenizer explictly') if feature_extractor is None and load_feature_extractor: for preprocessor in model.preprocessors: if isinstance(preprocessor, SequenceFeatureExtractor): feature_extractor = preprocessor break if feature_extractor is None: raise ValueError('Could not automatically find a feature extractor for the ORTModel, you must pass a feature_extractor explictly') model_id = None else: raise ValueError(f'Model {model} is not supported. Please provide a valid model either as string or ORTModel.\n You can also provide non model then a default one will be used') return (model, model_id, tokenizer, feature_extractor) MAPPING_LOADING_FUNC = {'ort': load_ort_pipeline, 'bettertransformer': load_bettertransformer} def pipeline(task: str=None, model: Optional[Any]=None, tokenizer: Optional[Union[str, PreTrainedTokenizer]]=None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]]=None, use_fast: bool=True, token: Optional[Union[str, bool]]=None, accelerator: Optional[str]='ort', revision: Optional[str]=None, trust_remote_code: Optional[bool]=None, *model_kwargs, **kwargs) -> Pipeline: targeted_task = 'translation' if task.startswith('translation') else task if accelerator == 'ort': if targeted_task not in list(ORT_SUPPORTED_TASKS.keys()): raise ValueError(f'Task {targeted_task} is not supported for the ONNX Runtime pipeline. Supported tasks are {list(ORT_SUPPORTED_TASKS.keys())}') if accelerator not in MAPPING_LOADING_FUNC: raise ValueError(f'Accelerator {accelerator} is not supported. Supported accelerators are "ort" and "bettertransformer".') hub_kwargs = {'revision': revision, 'token': token, 'trust_remote_code': trust_remote_code, '_commit_hash': None} config = kwargs.get('config', None) if config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **kwargs) hub_kwargs['_commit_hash'] = config._commit_hash supported_tasks = ORT_SUPPORTED_TASKS if accelerator == 'ort' else TRANSFORMERS_SUPPORTED_TASKS no_feature_extractor_tasks = set() no_tokenizer_tasks = set() for (_task, values) in supported_tasks.items(): if values['type'] == 'text': no_feature_extractor_tasks.add(_task) elif values['type'] in {'image', 'video'}: no_tokenizer_tasks.add(_task) elif values['type'] in {'audio'}: no_tokenizer_tasks.add(_task) elif values['type'] not in ['multimodal', 'audio', 'video']: raise ValueError(f"SUPPORTED_TASK {_task} contains invalid type {values['type']}") if targeted_task in no_tokenizer_tasks: load_tokenizer = False else: load_tokenizer = True if targeted_task in no_feature_extractor_tasks: load_feature_extractor = False else: load_feature_extractor = True (model, model_id, tokenizer, feature_extractor) = MAPPING_LOADING_FUNC[accelerator](model, targeted_task, load_tokenizer, tokenizer, feature_extractor, load_feature_extractor, *model_kwargs, SUPPORTED_TASKS=supported_tasks, config=config, hub_kwargs=hub_kwargs, token=token, **kwargs) if tokenizer is None and load_tokenizer: tokenizer = get_preprocessor(model_id) if feature_extractor is None and load_feature_extractor: feature_extractor = get_preprocessor(model_id) return transformers_pipeline(task, model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, use_fast=use_fast, **kwargs) # File: optimum-main/optimum/quantization_base.py import logging from abc import ABC, abstractmethod from pathlib import Path from typing import Optional, Union logger = logging.getLogger(__name__) class OptimumQuantizer(ABC): @classmethod def from_pretrained(cls, model_or_path: Union[str, Path], file_name: Optional[str]=None): raise NotImplementedError('Overwrite this method in subclass to define how to load your model from pretrained for quantization') @abstractmethod def quantize(self, save_dir: Union[str, Path], file_prefix: Optional[str]=None, **kwargs): raise NotImplementedError('Overwrite this method in subclass to define how to quantize your model for quantization') # File: optimum-main/optimum/runs_base.py import os import subprocess from contextlib import contextmanager from time import perf_counter_ns from typing import Set import numpy as np import optuna import torch import transformers from datasets import Dataset from tqdm import trange from . import version as optimum_version from .utils.preprocessing import ImageClassificationProcessing, QuestionAnsweringProcessing, TextClassificationProcessing, TokenClassificationProcessing from .utils.runs import RunConfig, cpu_info_command os.environ['TOKENIZERS_PARALLELISM'] = 'false' def get_autoclass_name(task): if task in ['text-classification', 'audio-classification']: autoclass_name = 'sequence-classification' else: autoclass_name = task return autoclass_name class Calibrator: def __init__(self, calibration_dataset: Dataset, quantizer, model_path, qconfig, calibration_params, node_exclusion): self.calibration_dataset = calibration_dataset self.quantizer = quantizer self.model_path = model_path self.qconfig = qconfig self.calibration_params = calibration_params self.node_exclusion = node_exclusion def fit(self): raise NotImplementedError() class Run: def __init__(self, run_config: dict): RunConfig(**run_config) self.task = run_config['task'] if run_config['quantization_approach'] == 'static': self.static_quantization = True else: self.static_quantization = False search_space = {'batch_size': run_config['batch_sizes'], 'input_length': run_config['input_lengths']} self.study = optuna.create_study(directions=['maximize', 'minimize'], sampler=optuna.samplers.GridSampler(search_space)) cpu_info = subprocess.check_output([cpu_info_command()], shell=True).decode('utf-8') optimum_hash = None if 'dev' in optimum_version.__version__: optimum_hash = subprocess.check_output("git ls-remote https://github.com/huggingface/optimum.git HEAD | awk '{ print $1}'", shell=True) optimum_hash = optimum_hash.decode('utf-8').strip('\n') self.return_body = {'model_name_or_path': run_config['model_name_or_path'], 'task': self.task, 'task_args': run_config['task_args'], 'dataset': run_config['dataset'], 'quantization_approach': run_config['quantization_approach'], 'operators_to_quantize': run_config['operators_to_quantize'], 'node_exclusion': run_config['node_exclusion'], 'aware_training': run_config['aware_training'], 'per_channel': run_config['per_channel'], 'calibration': run_config['calibration'], 'framework': run_config['framework'], 'framework_args': run_config['framework_args'], 'hardware': cpu_info, 'versions': {'transformers': transformers.__version__, 'optimum': optimum_version.__version__, 'optimum_hash': optimum_hash}, 'evaluation': {'time': [], 'others': {'baseline': {}, 'optimized': {}}}, 'max_eval_samples': run_config['max_eval_samples'], 'time_benchmark_args': run_config['time_benchmark_args']} def launch(self): try: self.study.optimize(self._launch_time) self.launch_eval() finally: self.finalize() print('Finished run.') return self.return_body def _launch_time(self, trial): raise NotImplementedError() def launch_eval(self): raise NotImplementedError() def load_datasets(self): datasets_dict = self.task_processor.load_datasets() self._eval_dataset = datasets_dict['eval'] if self.static_quantization: self._calibration_dataset = datasets_dict['calibration'] def get_calibration_dataset(self): if not hasattr(self, '_calibration_dataset'): raise KeyError('No calibration dataset defined for this run.') return self._calibration_dataset def get_eval_dataset(self): if not hasattr(self, '_eval_dataset'): raise KeyError('No evaluation dataset defined for this run.') return self._eval_dataset def finalize(self): raise NotImplementedError() SEC_TO_NS_SCALE = 1000000000 NS_TO_MS_SCALE = 1000000.0 def ns_to_ms(ns_time): return ns_time / NS_TO_MS_SCALE class TimeBenchmark: def __init__(self, model, batch_size: int, input_length: int, model_input_names: Set[str], warmup_runs: int, duration: float): self.batch_size = batch_size self.input_length = input_length self.model = model self.warmup_runs = warmup_runs self.benchmark_duration = duration self.latencies = [] self.throughput = float('-inf') self.model_input_names = model_input_names @property def num_runs(self) -> int: return len(self.latencies) @contextmanager def track(self): start = perf_counter_ns() yield end = perf_counter_ns() self.latencies.append(end - start) print(f'Tracked function took: {end - start}ns ({(end - start) / 1000000.0:.3f}ms)') def finalize(self, duration_ns: int): self.throughput = round(len(self.latencies) / duration_ns * SEC_TO_NS_SCALE, 2) def to_dict(self): benchmarks_stats = {'nb_forwards': len(self.latencies), 'throughput': self.throughput, 'latency_mean': ns_to_ms(np.mean(self.latencies)), 'latency_std': ns_to_ms(np.std(self.latencies)), 'latency_50': ns_to_ms(np.quantile(self.latencies, 0.5)), 'latency_90': ns_to_ms(np.quantile(self.latencies, 0.9)), 'latency_95': ns_to_ms(np.quantile(self.latencies, 0.95)), 'latency_99': ns_to_ms(np.quantile(self.latencies, 0.99)), 'latency_999': ns_to_ms(np.quantile(self.latencies, 0.999))} return benchmarks_stats def execute(self): inputs = {} checked_inputs = {'input_ids', 'attention_mask', 'token_type_ids', 'pixel_values'} if 'input_ids' in self.model_input_names: inputs['input_ids'] = torch.randint(high=1000, size=(self.batch_size, self.input_length)) if 'attention_mask' in self.model_input_names: inputs['attention_mask'] = torch.ones(self.batch_size, self.input_length, dtype=torch.int64) if 'token_type_ids' in self.model_input_names: inputs['token_type_ids'] = torch.ones(self.batch_size, self.input_length, dtype=torch.int64) if 'pixel_values' in self.model_input_names: inputs['pixel_values'] = torch.rand(self.batch_size, 3, self.model.config.image_size, self.model.config.image_size, dtype=torch.float32) if np.any([k not in checked_inputs for k in self.model_input_names]): raise NotImplementedError(f'At least an input in {self.model_input_names} has no dummy generation for time benchmark.') for _ in trange(self.warmup_runs, desc='Warming up'): self.model.forward(**inputs) if self.benchmark_duration != 0: benchmark_duration_ns = self.benchmark_duration * SEC_TO_NS_SCALE print(f'Running time tracking in {self.benchmark_duration:.1f}s.') while sum(self.latencies) < benchmark_duration_ns: with self.track(): self.model.forward(**inputs) self.finalize(benchmark_duration_ns) return self.to_dict() else: benchmarks_stats = {'nb_forwards': 0, 'throughput': -1, 'latency_mean': -1} return benchmarks_stats task_processing_map = {'text-classification': TextClassificationProcessing, 'token-classification': TokenClassificationProcessing, 'question-answering': QuestionAnsweringProcessing, 'image-classification': ImageClassificationProcessing} # File: optimum-main/optimum/subpackages.py import importlib import logging import sys if sys.version_info >= (3, 8): from importlib import metadata as importlib_metadata else: import importlib_metadata from importlib.util import find_spec, module_from_spec from .utils import is_onnxruntime_available logger = logging.getLogger(__name__) def load_namespace_modules(namespace: str, module: str): for dist in importlib_metadata.distributions(): dist_name = dist.metadata['Name'] if not dist_name.startswith(f'{namespace}-'): continue package_import_name = dist_name.replace('-', '.') module_import_name = f'{package_import_name}.{module}' if module_import_name in sys.modules: continue backend_spec = find_spec(module_import_name) if backend_spec is None: continue try: imported_module = module_from_spec(backend_spec) sys.modules[module_import_name] = imported_module backend_spec.loader.exec_module(imported_module) logger.debug(f'Successfully loaded {module_import_name}') except Exception as e: logger.error(f'An exception occured while loading {module_import_name}: {e}.') def load_subpackages(): SUBPACKAGE_LOADER = 'subpackage' load_namespace_modules('optimum', SUBPACKAGE_LOADER) loader_name = '.' + SUBPACKAGE_LOADER if is_onnxruntime_available(): importlib.import_module(loader_name, package='optimum.onnxruntime')