zhangtao-whu commited on
Commit
1ea9622
·
verified ·
1 Parent(s): b0c14ef

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 32019,
3
+ "</img>": 32012,
4
+ "</p>": 32022,
5
+ "</quad>": 32015,
6
+ "</ref>": 32017,
7
+ "</vp>": 32024,
8
+ "<IMG_CONTEXT>": 32013,
9
+ "<box>": 32018,
10
+ "<img>": 32011,
11
+ "<p>": 32021,
12
+ "<quad>": 32014,
13
+ "<ref>": 32016,
14
+ "<vp>": 32023,
15
+ "<|assistant|>": 32001,
16
+ "<|endoftext|>": 32000,
17
+ "<|end|>": 32007,
18
+ "<|placeholder1|>": 32002,
19
+ "<|placeholder2|>": 32003,
20
+ "<|placeholder3|>": 32004,
21
+ "<|placeholder4|>": 32005,
22
+ "<|placeholder5|>": 32008,
23
+ "<|placeholder6|>": 32009,
24
+ "<|system|>": 32006,
25
+ "<|user|>": 32010,
26
+ "[SEG]": 32020
27
+ }
config.json ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "Sa2VAChatModel"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_sa2va_chat.Sa2VAChatConfig",
8
+ "AutoModel": "modeling_sa2va_chat.Sa2VAChatModel",
9
+ "AutoModelForCausalLM": "modeling_sa2va_chat.Sa2VAChatModel"
10
+ },
11
+ "downsample_ratio": 0.5,
12
+ "dynamic_image_size": true,
13
+ "force_image_size": 448,
14
+ "hidden_size": 3072,
15
+ "llm_config": {
16
+ "_name_or_path": "microsoft/Phi-3-mini-128k-instruct",
17
+ "add_cross_attention": false,
18
+ "architectures": [
19
+ "Phi3ForCausalLM"
20
+ ],
21
+ "attention_dropout": 0.0,
22
+ "auto_map": {
23
+ "AutoConfig": "configuration_phi3.Phi3Config",
24
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
25
+ },
26
+ "bad_words_ids": null,
27
+ "begin_suppress_tokens": null,
28
+ "bos_token_id": 1,
29
+ "chunk_size_feed_forward": 0,
30
+ "cross_attention_hidden_size": null,
31
+ "decoder_start_token_id": null,
32
+ "diversity_penalty": 0.0,
33
+ "do_sample": false,
34
+ "early_stopping": false,
35
+ "embd_pdrop": 0.0,
36
+ "encoder_no_repeat_ngram_size": 0,
37
+ "eos_token_id": 32000,
38
+ "exponential_decay_length_penalty": null,
39
+ "finetuning_task": null,
40
+ "forced_bos_token_id": null,
41
+ "forced_eos_token_id": null,
42
+ "hidden_act": "silu",
43
+ "hidden_size": 3072,
44
+ "id2label": {
45
+ "0": "LABEL_0",
46
+ "1": "LABEL_1"
47
+ },
48
+ "initializer_range": 0.02,
49
+ "intermediate_size": 8192,
50
+ "is_decoder": false,
51
+ "is_encoder_decoder": false,
52
+ "label2id": {
53
+ "LABEL_0": 0,
54
+ "LABEL_1": 1
55
+ },
56
+ "length_penalty": 1.0,
57
+ "max_length": 20,
58
+ "max_position_embeddings": 131072,
59
+ "min_length": 0,
60
+ "model_type": "phi3",
61
+ "no_repeat_ngram_size": 0,
62
+ "num_attention_heads": 32,
63
+ "num_beam_groups": 1,
64
+ "num_beams": 1,
65
+ "num_hidden_layers": 32,
66
+ "num_key_value_heads": 32,
67
+ "num_return_sequences": 1,
68
+ "original_max_position_embeddings": 4096,
69
+ "output_attentions": false,
70
+ "output_hidden_states": false,
71
+ "output_scores": false,
72
+ "pad_token_id": 32000,
73
+ "prefix": null,
74
+ "problem_type": null,
75
+ "pruned_heads": {},
76
+ "remove_invalid_values": false,
77
+ "repetition_penalty": 1.0,
78
+ "resid_pdrop": 0.0,
79
+ "return_dict": true,
80
+ "return_dict_in_generate": false,
81
+ "rms_norm_eps": 1e-05,
82
+ "rope_scaling": {
83
+ "long_factor": [
84
+ 1.0299999713897705,
85
+ 1.0499999523162842,
86
+ 1.0499999523162842,
87
+ 1.0799999237060547,
88
+ 1.2299998998641968,
89
+ 1.2299998998641968,
90
+ 1.2999999523162842,
91
+ 1.4499999284744263,
92
+ 1.5999999046325684,
93
+ 1.6499998569488525,
94
+ 1.8999998569488525,
95
+ 2.859999895095825,
96
+ 3.68999981880188,
97
+ 5.419999599456787,
98
+ 5.489999771118164,
99
+ 5.489999771118164,
100
+ 9.09000015258789,
101
+ 11.579999923706055,
102
+ 15.65999984741211,
103
+ 15.769999504089355,
104
+ 15.789999961853027,
105
+ 18.360000610351562,
106
+ 21.989999771118164,
107
+ 23.079999923706055,
108
+ 30.009998321533203,
109
+ 32.35000228881836,
110
+ 32.590003967285156,
111
+ 35.56000518798828,
112
+ 39.95000457763672,
113
+ 53.840003967285156,
114
+ 56.20000457763672,
115
+ 57.95000457763672,
116
+ 59.29000473022461,
117
+ 59.77000427246094,
118
+ 59.920005798339844,
119
+ 61.190006256103516,
120
+ 61.96000671386719,
121
+ 62.50000762939453,
122
+ 63.3700065612793,
123
+ 63.48000717163086,
124
+ 63.48000717163086,
125
+ 63.66000747680664,
126
+ 63.850006103515625,
127
+ 64.08000946044922,
128
+ 64.760009765625,
129
+ 64.80001068115234,
130
+ 64.81001281738281,
131
+ 64.81001281738281
132
+ ],
133
+ "short_factor": [
134
+ 1.05,
135
+ 1.05,
136
+ 1.05,
137
+ 1.1,
138
+ 1.1,
139
+ 1.1500000000000001,
140
+ 1.2000000000000002,
141
+ 1.2500000000000002,
142
+ 1.3000000000000003,
143
+ 1.3500000000000003,
144
+ 1.5000000000000004,
145
+ 2.000000000000001,
146
+ 2.000000000000001,
147
+ 2.000000000000001,
148
+ 2.000000000000001,
149
+ 2.000000000000001,
150
+ 2.000000000000001,
151
+ 2.000000000000001,
152
+ 2.000000000000001,
153
+ 2.000000000000001,
154
+ 2.000000000000001,
155
+ 2.000000000000001,
156
+ 2.000000000000001,
157
+ 2.000000000000001,
158
+ 2.000000000000001,
159
+ 2.000000000000001,
160
+ 2.000000000000001,
161
+ 2.000000000000001,
162
+ 2.000000000000001,
163
+ 2.000000000000001,
164
+ 2.000000000000001,
165
+ 2.000000000000001,
166
+ 2.0500000000000007,
167
+ 2.0500000000000007,
168
+ 2.0500000000000007,
169
+ 2.1000000000000005,
170
+ 2.1000000000000005,
171
+ 2.1000000000000005,
172
+ 2.1500000000000004,
173
+ 2.1500000000000004,
174
+ 2.3499999999999996,
175
+ 2.549999999999999,
176
+ 2.5999999999999988,
177
+ 2.5999999999999988,
178
+ 2.7499999999999982,
179
+ 2.849999999999998,
180
+ 2.849999999999998,
181
+ 2.9499999999999975
182
+ ],
183
+ "type": "su"
184
+ },
185
+ "rope_theta": 10000.0,
186
+ "sep_token_id": null,
187
+ "sliding_window": 262144,
188
+ "suppress_tokens": null,
189
+ "task_specific_params": null,
190
+ "temperature": 1.0,
191
+ "tf_legacy_loss": false,
192
+ "tie_encoder_decoder": false,
193
+ "tie_word_embeddings": false,
194
+ "tokenizer_class": null,
195
+ "top_k": 50,
196
+ "top_p": 1.0,
197
+ "torch_dtype": "bfloat16",
198
+ "torchscript": false,
199
+ "transformers_version": "4.44.0",
200
+ "typical_p": 1.0,
201
+ "use_bfloat16": true,
202
+ "use_cache": true,
203
+ "vocab_size": 32025
204
+ },
205
+ "max_dynamic_patch": 12,
206
+ "min_dynamic_patch": 1,
207
+ "model_type": "sa2va_chat",
208
+ "pad2square": false,
209
+ "ps_version": "v2",
210
+ "select_layer": -1,
211
+ "template": "phi3-chat",
212
+ "tie_word_embeddings": false,
213
+ "torch_dtype": "bfloat16",
214
+ "transformers_version": null,
215
+ "use_backbone_lora": 0,
216
+ "use_llm_lora": 0,
217
+ "use_thumbnail": true,
218
+ "vision_config": {
219
+ "_name_or_path": "",
220
+ "add_cross_attention": false,
221
+ "architectures": [
222
+ "InternVisionModel"
223
+ ],
224
+ "attention_dropout": 0.0,
225
+ "bad_words_ids": null,
226
+ "begin_suppress_tokens": null,
227
+ "bos_token_id": null,
228
+ "chunk_size_feed_forward": 0,
229
+ "cross_attention_hidden_size": null,
230
+ "decoder_start_token_id": null,
231
+ "diversity_penalty": 0.0,
232
+ "do_sample": false,
233
+ "drop_path_rate": 0.0,
234
+ "dropout": 0.0,
235
+ "early_stopping": false,
236
+ "encoder_no_repeat_ngram_size": 0,
237
+ "eos_token_id": null,
238
+ "exponential_decay_length_penalty": null,
239
+ "finetuning_task": null,
240
+ "forced_bos_token_id": null,
241
+ "forced_eos_token_id": null,
242
+ "hidden_act": "gelu",
243
+ "hidden_size": 1024,
244
+ "id2label": {
245
+ "0": "LABEL_0",
246
+ "1": "LABEL_1"
247
+ },
248
+ "image_size": 448,
249
+ "initializer_factor": 1.0,
250
+ "initializer_range": 0.02,
251
+ "intermediate_size": 4096,
252
+ "is_decoder": false,
253
+ "is_encoder_decoder": false,
254
+ "label2id": {
255
+ "LABEL_0": 0,
256
+ "LABEL_1": 1
257
+ },
258
+ "layer_norm_eps": 1e-06,
259
+ "length_penalty": 1.0,
260
+ "max_length": 20,
261
+ "min_length": 0,
262
+ "model_type": "intern_vit_6b",
263
+ "no_repeat_ngram_size": 0,
264
+ "norm_type": "layer_norm",
265
+ "num_attention_heads": 16,
266
+ "num_beam_groups": 1,
267
+ "num_beams": 1,
268
+ "num_channels": 3,
269
+ "num_hidden_layers": 24,
270
+ "num_return_sequences": 1,
271
+ "output_attentions": false,
272
+ "output_hidden_states": false,
273
+ "output_scores": false,
274
+ "pad_token_id": null,
275
+ "patch_size": 14,
276
+ "prefix": null,
277
+ "problem_type": null,
278
+ "pruned_heads": {},
279
+ "qk_normalization": false,
280
+ "qkv_bias": true,
281
+ "remove_invalid_values": false,
282
+ "repetition_penalty": 1.0,
283
+ "return_dict": true,
284
+ "return_dict_in_generate": false,
285
+ "sep_token_id": null,
286
+ "suppress_tokens": null,
287
+ "task_specific_params": null,
288
+ "temperature": 1.0,
289
+ "tf_legacy_loss": false,
290
+ "tie_encoder_decoder": false,
291
+ "tie_word_embeddings": true,
292
+ "tokenizer_class": null,
293
+ "top_k": 50,
294
+ "top_p": 1.0,
295
+ "torch_dtype": "bfloat16",
296
+ "torchscript": false,
297
+ "transformers_version": "4.44.0",
298
+ "typical_p": 1.0,
299
+ "use_bfloat16": true,
300
+ "use_flash_attn": true
301
+ }
302
+ }
configuration_intern_vit.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class InternVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+
21
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
22
+ documentation from [`PretrainedConfig`] for more information.
23
+
24
+ Args:
25
+ num_channels (`int`, *optional*, defaults to 3):
26
+ Number of color channels in the input images (e.g., 3 for RGB).
27
+ patch_size (`int`, *optional*, defaults to 14):
28
+ The size (resolution) of each patch.
29
+ image_size (`int`, *optional*, defaults to 224):
30
+ The size (resolution) of each image.
31
+ qkv_bias (`bool`, *optional*, defaults to `False`):
32
+ Whether to add a bias to the queries and values in the self-attention layers.
33
+ hidden_size (`int`, *optional*, defaults to 3200):
34
+ Dimensionality of the encoder layers and the pooler layer.
35
+ num_attention_heads (`int`, *optional*, defaults to 25):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 12800):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ qk_normalization (`bool`, *optional*, defaults to `True`):
40
+ Whether to normalize the queries and keys in the self-attention layers.
41
+ num_hidden_layers (`int`, *optional*, defaults to 48):
42
+ Number of hidden layers in the Transformer encoder.
43
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
44
+ Whether to use flash attention mechanism.
45
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
48
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
49
+ The epsilon used by the layer normalization layers.
50
+ dropout (`float`, *optional*, defaults to 0.0):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
53
+ Dropout rate for stochastic depth.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ initializer_factor (`float`, *optional*, defaults to 0.1):
59
+ A factor for layer scale.
60
+ """
61
+
62
+ model_type = 'intern_vit_6b'
63
+
64
+ def __init__(
65
+ self,
66
+ num_channels=3,
67
+ patch_size=14,
68
+ image_size=224,
69
+ qkv_bias=False,
70
+ hidden_size=3200,
71
+ num_attention_heads=25,
72
+ intermediate_size=12800,
73
+ qk_normalization=True,
74
+ num_hidden_layers=48,
75
+ use_flash_attn=True,
76
+ hidden_act='gelu',
77
+ norm_type='rms_norm',
78
+ layer_norm_eps=1e-6,
79
+ dropout=0.0,
80
+ drop_path_rate=0.0,
81
+ attention_dropout=0.0,
82
+ initializer_range=0.02,
83
+ initializer_factor=0.1,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(**kwargs)
87
+
88
+ self.hidden_size = hidden_size
89
+ self.intermediate_size = intermediate_size
90
+ self.dropout = dropout
91
+ self.drop_path_rate = drop_path_rate
92
+ self.num_hidden_layers = num_hidden_layers
93
+ self.num_attention_heads = num_attention_heads
94
+ self.num_channels = num_channels
95
+ self.patch_size = patch_size
96
+ self.image_size = image_size
97
+ self.initializer_range = initializer_range
98
+ self.initializer_factor = initializer_factor
99
+ self.attention_dropout = attention_dropout
100
+ self.layer_norm_eps = layer_norm_eps
101
+ self.hidden_act = hidden_act
102
+ self.norm_type = norm_type
103
+ self.qkv_bias = qkv_bias
104
+ self.qk_normalization = qk_normalization
105
+ self.use_flash_attn = use_flash_attn
106
+
107
+ @classmethod
108
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
109
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
110
+
111
+ if 'vision_config' in config_dict:
112
+ config_dict = config_dict['vision_config']
113
+
114
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
115
+ logger.warning(
116
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
117
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
118
+ )
119
+
120
+ return cls.from_dict(config_dict, **kwargs)
configuration_internlm2.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ **kwargs,
99
+ ):
100
+ self.vocab_size = vocab_size
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.bias = bias
107
+
108
+ if num_key_value_heads is None:
109
+ num_key_value_heads = num_attention_heads
110
+ self.num_key_value_heads = num_key_value_heads
111
+
112
+ self.hidden_act = hidden_act
113
+ self.initializer_range = initializer_range
114
+ self.rms_norm_eps = rms_norm_eps
115
+ self.use_cache = use_cache
116
+ self.rope_theta = rope_theta
117
+ self.rope_scaling = rope_scaling
118
+ self._rope_scaling_validation()
119
+
120
+ self.attn_implementation = attn_implementation
121
+ if self.attn_implementation is None:
122
+ self.attn_implementation = 'eager'
123
+ super().__init__(
124
+ pad_token_id=pad_token_id,
125
+ bos_token_id=bos_token_id,
126
+ eos_token_id=eos_token_id,
127
+ tie_word_embeddings=tie_word_embeddings,
128
+ **kwargs,
129
+ )
130
+
131
+ def _rope_scaling_validation(self):
132
+ """
133
+ Validate the `rope_scaling` configuration.
134
+ """
135
+ if self.rope_scaling is None:
136
+ return
137
+
138
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
139
+ raise ValueError(
140
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
141
+ f'got {self.rope_scaling}'
142
+ )
143
+ rope_scaling_type = self.rope_scaling.get('type', None)
144
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
145
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
146
+ raise ValueError(
147
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
148
+ )
149
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
150
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
configuration_phi3.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License atd
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ Phi-3 model configuration"""
16
+
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ 'microsoft/Phi-3-mini-4k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json',
25
+ 'microsoft/Phi-3-mini-128k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json',
26
+ }
27
+
28
+
29
+ class Phi3Config(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the
34
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32064):
41
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`Phi3Model`].
43
+ hidden_size (`int`, *optional*, defaults to 3072):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 8192):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ num_key_value_heads (`int`, *optional*):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
+ `num_attention_heads`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
60
+ Dropout probability for mlp outputs.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the embeddings.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio after computing the attention scores.
65
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
66
+ The non-linear activation function (function or string) in the decoder.
67
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
68
+ The maximum sequence length that this model might ever be used with.
69
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
70
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
71
+ original RoPE embeddings when using long scaling.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
75
+ The epsilon value used for the RMSNorm.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
78
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
79
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80
+ Whether to tie weight embeddings
81
+ rope_theta (`float`, *optional*, defaults to 10000.0):
82
+ The base period of the RoPE embeddings.
83
+ rope_scaling (`dict`, *optional*):
84
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
85
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
86
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
87
+ divided by the number of attention heads divided by 2.
88
+ bos_token_id (`int`, *optional*, defaults to 1):
89
+ The id of the "beginning-of-sequence" token.
90
+ eos_token_id (`int`, *optional*, defaults to 32000):
91
+ The id of the "end-of-sequence" token.
92
+ pad_token_id (`int`, *optional*, defaults to 32000):
93
+ The id of the padding token.
94
+ sliding_window (`int`, *optional*):
95
+ Sliding window attention window size. If `None`, no sliding window is applied.
96
+
97
+ Example:
98
+
99
+ ```python
100
+ >>> from transformers import Phi3Model, Phi3Config
101
+
102
+ >>> # Initializing a Phi-3 style configuration
103
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
104
+
105
+ >>> # Initializing a model from the configuration
106
+ >>> model = Phi3Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = 'phi3'
113
+ keys_to_ignore_at_inference = ['past_key_values']
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=32064,
118
+ hidden_size=3072,
119
+ intermediate_size=8192,
120
+ num_hidden_layers=32,
121
+ num_attention_heads=32,
122
+ num_key_value_heads=None,
123
+ resid_pdrop=0.0,
124
+ embd_pdrop=0.0,
125
+ attention_dropout=0.0,
126
+ hidden_act='silu',
127
+ max_position_embeddings=4096,
128
+ original_max_position_embeddings=4096,
129
+ initializer_range=0.02,
130
+ rms_norm_eps=1e-5,
131
+ use_cache=True,
132
+ tie_word_embeddings=False,
133
+ rope_theta=10000.0,
134
+ rope_scaling=None,
135
+ bos_token_id=1,
136
+ eos_token_id=32000,
137
+ pad_token_id=32000,
138
+ sliding_window=None,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.hidden_size = hidden_size
143
+ self.intermediate_size = intermediate_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+
147
+ if num_key_value_heads is None:
148
+ num_key_value_heads = num_attention_heads
149
+
150
+ self.num_key_value_heads = num_key_value_heads
151
+ self.resid_pdrop = resid_pdrop
152
+ self.embd_pdrop = embd_pdrop
153
+ self.attention_dropout = attention_dropout
154
+ self.hidden_act = hidden_act
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.original_max_position_embeddings = original_max_position_embeddings
157
+ self.initializer_range = initializer_range
158
+ self.rms_norm_eps = rms_norm_eps
159
+ self.use_cache = use_cache
160
+ self.rope_theta = rope_theta
161
+ self.rope_scaling = rope_scaling
162
+ self._rope_scaling_validation()
163
+ self.sliding_window = sliding_window
164
+
165
+ super().__init__(
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ pad_token_id=pad_token_id,
169
+ tie_word_embeddings=tie_word_embeddings,
170
+ **kwargs,
171
+ )
172
+
173
+ def _rope_scaling_validation(self):
174
+ """
175
+ Validate the `rope_scaling` configuration.
176
+ """
177
+ if self.rope_scaling is None:
178
+ return
179
+
180
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
181
+ raise ValueError(
182
+ '`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, '
183
+ f'got {self.rope_scaling}'
184
+ )
185
+ rope_scaling_type = self.rope_scaling.get('type', None)
186
+ rope_scaling_short_factor = self.rope_scaling.get('short_factor', None)
187
+ rope_scaling_long_factor = self.rope_scaling.get('long_factor', None)
188
+ if rope_scaling_type is None or rope_scaling_type not in ['su', 'yarn']:
189
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
190
+ if not (
191
+ isinstance(rope_scaling_short_factor, list)
192
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
193
+ ):
194
+ raise ValueError(
195
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
196
+ )
197
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
198
+ raise ValueError(
199
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
200
+ )
201
+ if not (
202
+ isinstance(rope_scaling_long_factor, list)
203
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
204
+ ):
205
+ raise ValueError(
206
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
207
+ )
208
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
209
+ raise ValueError(
210
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
211
+ )
configuration_sa2va_chat.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from .configuration_internlm2 import InternLM2Config
10
+ from .configuration_phi3 import Phi3Config
11
+ from transformers import AutoConfig, LlamaConfig, Qwen2Config
12
+ from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.utils import logging
14
+
15
+ from .configuration_intern_vit import InternVisionConfig
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class Sa2VAChatConfig(PretrainedConfig):
21
+ model_type = 'sa2va_chat'
22
+ is_composition = True
23
+
24
+ def __init__(
25
+ self,
26
+ vision_config=None,
27
+ llm_config=None,
28
+ use_backbone_lora=0,
29
+ use_llm_lora=0,
30
+ pad2square=False,
31
+ select_layer=-1,
32
+ force_image_size=None,
33
+ downsample_ratio=0.5,
34
+ template=None,
35
+ dynamic_image_size=False,
36
+ use_thumbnail=False,
37
+ ps_version='v1',
38
+ min_dynamic_patch=1,
39
+ max_dynamic_patch=6,
40
+ **kwargs):
41
+ super().__init__(**kwargs)
42
+
43
+ if vision_config is None:
44
+ vision_config = {}
45
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
46
+
47
+ if llm_config is None:
48
+ llm_config = {}
49
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
50
+
51
+ self.vision_config = InternVisionConfig(**vision_config)
52
+ if llm_config['architectures'][0] == 'LlamaForCausalLM':
53
+ self.llm_config = LlamaConfig(**llm_config)
54
+ elif llm_config['architectures'][0] == 'InternLM2ForCausalLM':
55
+ self.llm_config = InternLM2Config(**llm_config)
56
+ elif llm_config['architectures'][0] == 'Phi3ForCausalLM':
57
+ self.llm_config = Phi3Config(**llm_config)
58
+ elif llm_config['architectures'][0] == 'Qwen2ForCausalLM':
59
+ self.llm_config = Qwen2Config(**llm_config)
60
+ else:
61
+ raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
62
+ self.use_backbone_lora = use_backbone_lora
63
+ self.use_llm_lora = use_llm_lora
64
+ self.pad2square = pad2square
65
+ self.select_layer = select_layer
66
+ self.force_image_size = force_image_size
67
+ self.downsample_ratio = downsample_ratio
68
+ self.template = template
69
+ self.dynamic_image_size = dynamic_image_size
70
+ self.use_thumbnail = use_thumbnail
71
+ self.ps_version = ps_version # pixel shuffle version
72
+ self.min_dynamic_patch = min_dynamic_patch
73
+ self.max_dynamic_patch = max_dynamic_patch
74
+
75
+ self.hidden_size = self.llm_config.hidden_size
76
+ self.tie_word_embeddings = False
77
+
78
+ logger.info(f'vision_select_layer: {self.select_layer}')
79
+ logger.info(f'ps_version: {self.ps_version}')
80
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
81
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
82
+
83
+ def to_dict(self):
84
+ """
85
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
86
+
87
+ Returns:
88
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
89
+ """
90
+ output = copy.deepcopy(self.__dict__)
91
+ output['vision_config'] = self.vision_config.to_dict()
92
+ output['llm_config'] = self.llm_config.to_dict()
93
+ output['model_type'] = self.__class__.model_type
94
+ output['use_backbone_lora'] = self.use_backbone_lora
95
+ output['use_llm_lora'] = self.use_llm_lora
96
+ output['pad2square'] = self.pad2square
97
+ output['select_layer'] = self.select_layer
98
+ output['force_image_size'] = self.force_image_size
99
+ output['downsample_ratio'] = self.downsample_ratio
100
+ output['template'] = self.template
101
+ output['dynamic_image_size'] = self.dynamic_image_size
102
+ output['use_thumbnail'] = self.use_thumbnail
103
+ output['ps_version'] = self.ps_version
104
+ output['min_dynamic_patch'] = self.min_dynamic_patch
105
+ output['max_dynamic_patch'] = self.max_dynamic_patch
106
+
107
+ return output
flash_attention.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
2
+ import torch
3
+ import torch.nn as nn
4
+ from einops import rearrange
5
+
6
+ try: # v1
7
+ from flash_attn.flash_attn_interface import \
8
+ flash_attn_unpadded_qkvpacked_func
9
+ except: # v2
10
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
11
+
12
+ from flash_attn.bert_padding import pad_input, unpad_input
13
+
14
+
15
+ class FlashAttention(nn.Module):
16
+ """Implement the scaled dot product attention with softmax.
17
+ Arguments
18
+ ---------
19
+ softmax_scale: The temperature to use for the softmax attention.
20
+ (default: 1/sqrt(d_keys) where d_keys is computed at
21
+ runtime)
22
+ attention_dropout: The dropout rate to apply to the attention
23
+ (default: 0.0)
24
+ """
25
+
26
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
27
+ super().__init__()
28
+ self.softmax_scale = softmax_scale
29
+ self.dropout_p = attention_dropout
30
+
31
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
32
+ max_s=None, need_weights=False):
33
+ """Implements the multihead softmax attention.
34
+ Arguments
35
+ ---------
36
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
37
+ if unpadded: (nnz, 3, h, d)
38
+ key_padding_mask: a bool tensor of shape (B, S)
39
+ """
40
+ assert not need_weights
41
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
42
+ assert qkv.is_cuda
43
+
44
+ if cu_seqlens is None:
45
+ batch_size = qkv.shape[0]
46
+ seqlen = qkv.shape[1]
47
+ if key_padding_mask is None:
48
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
49
+ max_s = seqlen
50
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
51
+ device=qkv.device)
52
+ output = flash_attn_unpadded_qkvpacked_func(
53
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
54
+ softmax_scale=self.softmax_scale, causal=causal
55
+ )
56
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
57
+ else:
58
+ nheads = qkv.shape[-2]
59
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
60
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
61
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
62
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
63
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
64
+ softmax_scale=self.softmax_scale, causal=causal
65
+ )
66
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
67
+ indices, batch_size, seqlen),
68
+ 'b s (h d) -> b s h d', h=nheads)
69
+ else:
70
+ assert max_s is not None
71
+ output = flash_attn_unpadded_qkvpacked_func(
72
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
+ softmax_scale=self.softmax_scale, causal=causal
74
+ )
75
+
76
+ return output, None
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.44.0"
4
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6de6916b83836433e6c202083245084c89c097688fe2f5ab9223876b2f12bf
3
+ size 4977991320
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db334fdf07db1ba9267bb0ad3de15e705da5dcf3acbb7a8270e599213042e971
3
+ size 4983112160
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c6dcf68370573955119bbbf4c26d6352a65c7cbc57432d84fb03854a03425e
3
+ size 4983112168
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:489ae71bc7b6e88364c739c9887685f422f22f8e0dc2817519280ad35a9bcb38
3
+ size 1974134160
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_intern_vit.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ from typing import Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from einops import rearrange
13
+ from timm.models.layers import DropPath
14
+ from torch import nn
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import (BaseModelOutput,
17
+ BaseModelOutputWithPooling)
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import logging
20
+
21
+ from .configuration_intern_vit import InternVisionConfig
22
+
23
+ try:
24
+ from .flash_attention import FlashAttention
25
+ has_flash_attn = True
26
+ except:
27
+ print('FlashAttention is not installed.')
28
+ has_flash_attn = False
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class InternRMSNorm(nn.Module):
34
+ def __init__(self, hidden_size, eps=1e-6):
35
+ super().__init__()
36
+ self.weight = nn.Parameter(torch.ones(hidden_size))
37
+ self.variance_epsilon = eps
38
+
39
+ def forward(self, hidden_states):
40
+ input_dtype = hidden_states.dtype
41
+ hidden_states = hidden_states.to(torch.float32)
42
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
+ return self.weight * hidden_states.to(input_dtype)
45
+
46
+
47
+ try:
48
+ from apex.normalization import FusedRMSNorm
49
+
50
+ InternRMSNorm = FusedRMSNorm # noqa
51
+
52
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
+ except ImportError:
54
+ # using the normal InternRMSNorm
55
+ pass
56
+ except Exception:
57
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
+ pass
59
+
60
+
61
+ NORM2FN = {
62
+ 'rms_norm': InternRMSNorm,
63
+ 'layer_norm': nn.LayerNorm,
64
+ }
65
+
66
+
67
+ class InternVisionEmbeddings(nn.Module):
68
+ def __init__(self, config: InternVisionConfig):
69
+ super().__init__()
70
+ self.config = config
71
+ self.embed_dim = config.hidden_size
72
+ self.image_size = config.image_size
73
+ self.patch_size = config.patch_size
74
+
75
+ self.class_embedding = nn.Parameter(
76
+ torch.randn(1, 1, self.embed_dim),
77
+ )
78
+
79
+ self.patch_embedding = nn.Conv2d(
80
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
81
+ )
82
+
83
+ self.num_patches = (self.image_size // self.patch_size) ** 2
84
+ self.num_positions = self.num_patches + 1
85
+
86
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
87
+
88
+ def _get_pos_embed(self, pos_embed, H, W):
89
+ target_dtype = pos_embed.dtype
90
+ pos_embed = pos_embed.float().reshape(
91
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
92
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
93
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
94
+ return pos_embed
95
+
96
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
97
+ target_dtype = self.patch_embedding.weight.dtype
98
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
99
+ batch_size, _, height, width = patch_embeds.shape
100
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
101
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
102
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
103
+ position_embedding = torch.cat([
104
+ self.position_embedding[:, :1, :],
105
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
106
+ ], dim=1)
107
+ embeddings = embeddings + position_embedding.to(target_dtype)
108
+ return embeddings
109
+
110
+
111
+ class InternAttention(nn.Module):
112
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
113
+
114
+ def __init__(self, config: InternVisionConfig):
115
+ super().__init__()
116
+ self.config = config
117
+ self.embed_dim = config.hidden_size
118
+ self.num_heads = config.num_attention_heads
119
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
120
+ if config.use_flash_attn and not has_flash_attn:
121
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
122
+ self.head_dim = self.embed_dim // self.num_heads
123
+ if self.head_dim * self.num_heads != self.embed_dim:
124
+ raise ValueError(
125
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
126
+ f' {self.num_heads}).'
127
+ )
128
+
129
+ self.scale = self.head_dim ** -0.5
130
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
131
+ self.attn_drop = nn.Dropout(config.attention_dropout)
132
+ self.proj_drop = nn.Dropout(config.dropout)
133
+
134
+ self.qk_normalization = config.qk_normalization
135
+
136
+ if self.qk_normalization:
137
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
138
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
139
+
140
+ if self.use_flash_attn:
141
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
142
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
143
+
144
+ def _naive_attn(self, x):
145
+ B, N, C = x.shape
146
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
147
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
148
+
149
+ if self.qk_normalization:
150
+ B_, H_, N_, D_ = q.shape
151
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
152
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
153
+
154
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
155
+ attn = attn.softmax(dim=-1)
156
+ attn = self.attn_drop(attn)
157
+
158
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
159
+ x = self.proj(x)
160
+ x = self.proj_drop(x)
161
+ return x
162
+
163
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
164
+ qkv = self.qkv(x)
165
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
166
+
167
+ if self.qk_normalization:
168
+ q, k, v = qkv.unbind(2)
169
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
170
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
171
+ qkv = torch.stack([q, k, v], dim=2)
172
+
173
+ context, _ = self.inner_attn(
174
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
175
+ )
176
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
177
+ outs = self.proj_drop(outs)
178
+ return outs
179
+
180
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
181
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
182
+ return x
183
+
184
+
185
+ class InternMLP(nn.Module):
186
+ def __init__(self, config: InternVisionConfig):
187
+ super().__init__()
188
+ self.config = config
189
+ self.act = ACT2FN[config.hidden_act]
190
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
191
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
192
+
193
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
194
+ hidden_states = self.fc1(hidden_states)
195
+ hidden_states = self.act(hidden_states)
196
+ hidden_states = self.fc2(hidden_states)
197
+ return hidden_states
198
+
199
+
200
+ class InternVisionEncoderLayer(nn.Module):
201
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
202
+ super().__init__()
203
+ self.embed_dim = config.hidden_size
204
+ self.intermediate_size = config.intermediate_size
205
+ self.norm_type = config.norm_type
206
+
207
+ self.attn = InternAttention(config)
208
+ self.mlp = InternMLP(config)
209
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
210
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
211
+
212
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
213
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
214
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
215
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
216
+
217
+ def forward(
218
+ self,
219
+ hidden_states: torch.Tensor,
220
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
221
+ """
222
+ Args:
223
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
224
+ """
225
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
226
+
227
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
228
+
229
+ return hidden_states
230
+
231
+
232
+ class InternVisionEncoder(nn.Module):
233
+ """
234
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
235
+ [`InternEncoderLayer`].
236
+
237
+ Args:
238
+ config (`InternConfig`):
239
+ The corresponding vision configuration for the `InternEncoder`.
240
+ """
241
+
242
+ def __init__(self, config: InternVisionConfig):
243
+ super().__init__()
244
+ self.config = config
245
+ # stochastic depth decay rule
246
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
247
+ self.layers = nn.ModuleList([
248
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
249
+ self.gradient_checkpointing = True
250
+
251
+ def forward(
252
+ self,
253
+ inputs_embeds,
254
+ output_hidden_states: Optional[bool] = None,
255
+ return_dict: Optional[bool] = None,
256
+ ) -> Union[Tuple, BaseModelOutput]:
257
+ r"""
258
+ Args:
259
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
260
+ Embedded representation of the inputs. Should be float, not int tokens.
261
+ output_hidden_states (`bool`, *optional*):
262
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
263
+ for more detail.
264
+ return_dict (`bool`, *optional*):
265
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
266
+ """
267
+ output_hidden_states = (
268
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
269
+ )
270
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
271
+
272
+ encoder_states = () if output_hidden_states else None
273
+ hidden_states = inputs_embeds
274
+
275
+ for idx, encoder_layer in enumerate(self.layers):
276
+ if output_hidden_states:
277
+ encoder_states = encoder_states + (hidden_states,)
278
+ if self.gradient_checkpointing and self.training:
279
+ layer_outputs = torch.utils.checkpoint.checkpoint(
280
+ encoder_layer,
281
+ hidden_states)
282
+ else:
283
+ layer_outputs = encoder_layer(
284
+ hidden_states,
285
+ )
286
+ hidden_states = layer_outputs
287
+
288
+ if output_hidden_states:
289
+ encoder_states = encoder_states + (hidden_states,)
290
+
291
+ if not return_dict:
292
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
293
+ return BaseModelOutput(
294
+ last_hidden_state=hidden_states, hidden_states=encoder_states
295
+ )
296
+
297
+
298
+ class InternVisionModel(PreTrainedModel):
299
+ main_input_name = 'pixel_values'
300
+ _supports_flash_attn_2 = True
301
+ config_class = InternVisionConfig
302
+ _no_split_modules = ['InternVisionEncoderLayer']
303
+
304
+ def __init__(self, config: InternVisionConfig):
305
+ super().__init__(config)
306
+ self.config = config
307
+
308
+ self.embeddings = InternVisionEmbeddings(config)
309
+ self.encoder = InternVisionEncoder(config)
310
+
311
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
312
+ pos_emb = self.embeddings.position_embedding
313
+ _, num_positions, embed_dim = pos_emb.shape
314
+ cls_emb = pos_emb[:, :1, :]
315
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
316
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
317
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
318
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
319
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
320
+ self.embeddings.image_size = new_size
321
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
322
+
323
+ def get_input_embeddings(self):
324
+ return self.embeddings
325
+
326
+ def forward(
327
+ self,
328
+ pixel_values: Optional[torch.FloatTensor] = None,
329
+ output_hidden_states: Optional[bool] = None,
330
+ return_dict: Optional[bool] = None,
331
+ pixel_embeds: Optional[torch.FloatTensor] = None,
332
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ if pixel_values is None and pixel_embeds is None:
339
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
340
+
341
+ if pixel_embeds is not None:
342
+ hidden_states = pixel_embeds
343
+ else:
344
+ if len(pixel_values.shape) == 4:
345
+ hidden_states = self.embeddings(pixel_values)
346
+ else:
347
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
348
+ encoder_outputs = self.encoder(
349
+ inputs_embeds=hidden_states,
350
+ output_hidden_states=output_hidden_states,
351
+ return_dict=return_dict,
352
+ )
353
+ last_hidden_state = encoder_outputs.last_hidden_state
354
+ pooled_output = last_hidden_state[:, 0, :]
355
+
356
+ if not return_dict:
357
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
358
+
359
+ return BaseModelOutputWithPooling(
360
+ last_hidden_state=last_hidden_state,
361
+ pooler_output=pooled_output,
362
+ hidden_states=encoder_outputs.hidden_states,
363
+ attentions=encoder_outputs.attentions,
364
+ )
modeling_internlm2.py ADDED
@@ -0,0 +1,1429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ SequenceClassifierOutputWithPast)
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (add_start_docstrings,
35
+ add_start_docstrings_to_model_forward, logging,
36
+ replace_return_docstrings)
37
+
38
+ try:
39
+ from transformers.generation.streamers import BaseStreamer
40
+ except: # noqa # pylint: disable=bare-except
41
+ BaseStreamer = None
42
+
43
+ from .configuration_internlm2 import InternLM2Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = 'InternLM2Config'
48
+
49
+ flash_attn_func, flash_attn_varlen_func = None, None
50
+ pad_input, index_first_axis, unpad_input = None, None, None
51
+ try:
52
+ from flash_attn import flash_attn_func as _flash_attn_func
53
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
55
+ from flash_attn.bert_padding import pad_input as _pad_input
56
+ from flash_attn.bert_padding import unpad_input as _unpad_input
57
+
58
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
59
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
60
+ has_flash_attn = True
61
+ except:
62
+ has_flash_attn = False
63
+
64
+
65
+ def _import_flash_attn():
66
+ global flash_attn_func, flash_attn_varlen_func
67
+ global pad_input, index_first_axis, unpad_input
68
+ try:
69
+ from flash_attn import flash_attn_func as _flash_attn_func
70
+ from flash_attn import \
71
+ flash_attn_varlen_func as _flash_attn_varlen_func
72
+ from flash_attn.bert_padding import \
73
+ index_first_axis as _index_first_axis
74
+ from flash_attn.bert_padding import pad_input as _pad_input
75
+ from flash_attn.bert_padding import unpad_input as _unpad_input
76
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
77
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
78
+ except ImportError:
79
+ raise ImportError('flash_attn is not installed.')
80
+
81
+
82
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
83
+ def _get_unpad_data(attention_mask):
84
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
85
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
86
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
87
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
88
+ return (
89
+ indices,
90
+ cu_seqlens,
91
+ max_seqlen_in_batch,
92
+ )
93
+
94
+
95
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
96
+ def _make_causal_mask(
97
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
98
+ ):
99
+ """
100
+ Make causal mask used for bi-directional self-attention.
101
+ """
102
+ bsz, tgt_len = input_ids_shape
103
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
104
+ mask_cond = torch.arange(mask.size(-1), device=device)
105
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
106
+ mask = mask.to(dtype)
107
+
108
+ if past_key_values_length > 0:
109
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
110
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
111
+
112
+
113
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
114
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
115
+ """
116
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
117
+ """
118
+ bsz, src_len = mask.size()
119
+ tgt_len = tgt_len if tgt_len is not None else src_len
120
+
121
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
122
+
123
+ inverted_mask = 1.0 - expanded_mask
124
+
125
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
126
+
127
+
128
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
129
+ class InternLM2RMSNorm(nn.Module):
130
+ def __init__(self, hidden_size, eps=1e-6):
131
+ """
132
+ InternLM2RMSNorm is equivalent to T5LayerNorm
133
+ """
134
+ super().__init__()
135
+ self.weight = nn.Parameter(torch.ones(hidden_size))
136
+ self.variance_epsilon = eps
137
+
138
+ def forward(self, hidden_states):
139
+ input_dtype = hidden_states.dtype
140
+ hidden_states = hidden_states.to(torch.float32)
141
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
142
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
143
+ return self.weight * hidden_states.to(input_dtype)
144
+
145
+
146
+ try:
147
+ from functools import partial
148
+
149
+ from apex.normalization import FusedRMSNorm
150
+ InternLM2RMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa
151
+ print('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternLM2RMSNorm')
152
+ except ImportError:
153
+ # using the normal LlamaRMSNorm
154
+ pass
155
+ except Exception:
156
+ print('discovered apex but it failed to load, falling back to InternLM2RMSNorm')
157
+ pass
158
+
159
+
160
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
161
+ class InternLM2RotaryEmbedding(nn.Module):
162
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
163
+ super().__init__()
164
+
165
+ self.dim = dim
166
+ self.max_position_embeddings = max_position_embeddings
167
+ self.base = base
168
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
169
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
170
+
171
+ # Build here to make `torch.jit.trace` work.
172
+ self._set_cos_sin_cache(
173
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
174
+ )
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
179
+
180
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
181
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
182
+ emb = torch.cat((freqs, freqs), dim=-1)
183
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
184
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
185
+
186
+ def forward(self, x, seq_len=None):
187
+ # x: [bs, num_attention_heads, seq_len, head_size]
188
+ if seq_len > self.max_seq_len_cached:
189
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
190
+
191
+ return (
192
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
193
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
194
+ )
195
+
196
+
197
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
198
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
199
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
200
+
201
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
202
+ self.scaling_factor = scaling_factor
203
+ super().__init__(dim, max_position_embeddings, base, device)
204
+
205
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
206
+ self.max_seq_len_cached = seq_len
207
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
208
+ t = t / self.scaling_factor
209
+
210
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
215
+
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
218
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
219
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
220
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
221
+ """
222
+
223
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
224
+ self.scaling_factor = scaling_factor
225
+ super().__init__(dim, max_position_embeddings, base, device)
226
+
227
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
228
+ self.max_seq_len_cached = seq_len
229
+
230
+ if seq_len > self.max_position_embeddings:
231
+ base = self.base * (
232
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
233
+ ) ** (self.dim / (self.dim - 2))
234
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
235
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
236
+
237
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
238
+
239
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
240
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
241
+ emb = torch.cat((freqs, freqs), dim=-1)
242
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
243
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
244
+
245
+
246
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
247
+ def rotate_half(x):
248
+ """Rotates half the hidden dims of the input."""
249
+ x1 = x[..., : x.shape[-1] // 2]
250
+ x2 = x[..., x.shape[-1] // 2:]
251
+ return torch.cat((-x2, x1), dim=-1)
252
+
253
+
254
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
255
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
256
+ """Applies Rotary Position Embedding to the query and key tensors."""
257
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
258
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
259
+ q_embed = (q * cos) + (rotate_half(q) * sin)
260
+ k_embed = (k * cos) + (rotate_half(k) * sin)
261
+ return q_embed, k_embed
262
+
263
+
264
+ class InternLM2MLP(nn.Module):
265
+ def __init__(self, config):
266
+ super().__init__()
267
+ self.config = config
268
+ self.hidden_size = config.hidden_size
269
+ self.intermediate_size = config.intermediate_size
270
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
271
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
272
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
273
+ self.act_fn = ACT2FN[config.hidden_act]
274
+
275
+ def forward(self, x):
276
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
277
+
278
+ return down_proj
279
+
280
+
281
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
282
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
283
+ """
284
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
285
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
286
+ """
287
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
288
+ if n_rep == 1:
289
+ return hidden_states
290
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
291
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
292
+
293
+
294
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
295
+ class InternLM2Attention(nn.Module):
296
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
297
+
298
+ def __init__(self, config: InternLM2Config):
299
+ super().__init__()
300
+ self.config = config
301
+ self.hidden_size = config.hidden_size
302
+ self.num_heads = config.num_attention_heads
303
+ self.head_dim = self.hidden_size // self.num_heads
304
+ self.num_key_value_heads = config.num_key_value_heads
305
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
306
+ self.max_position_embeddings = config.max_position_embeddings
307
+ self.is_causal = True
308
+
309
+ if (self.head_dim * self.num_heads) != self.hidden_size:
310
+ raise ValueError(
311
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
312
+ f' and `num_heads`: {self.num_heads}).'
313
+ )
314
+
315
+ self.wqkv = nn.Linear(
316
+ self.hidden_size,
317
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
318
+ bias=config.bias,
319
+ )
320
+
321
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
322
+ self._init_rope()
323
+
324
+ def _init_rope(self):
325
+ if self.config.rope_scaling is None:
326
+ self.rotary_emb = InternLM2RotaryEmbedding(
327
+ self.head_dim,
328
+ max_position_embeddings=self.max_position_embeddings,
329
+ base=self.config.rope_theta,
330
+ )
331
+ else:
332
+ scaling_type = self.config.rope_scaling['type']
333
+ scaling_factor = self.config.rope_scaling['factor']
334
+ if scaling_type == 'dynamic':
335
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
336
+ self.head_dim,
337
+ max_position_embeddings=self.max_position_embeddings,
338
+ base=self.config.rope_theta,
339
+ scaling_factor=scaling_factor,
340
+ )
341
+ elif scaling_type == 'linear':
342
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
343
+ self.head_dim,
344
+ max_position_embeddings=self.max_position_embeddings,
345
+ base=self.config.rope_theta,
346
+ scaling_factor=scaling_factor,
347
+ )
348
+ else:
349
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
350
+ return self.rotary_emb
351
+
352
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
353
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
354
+
355
+ def forward(
356
+ self,
357
+ hidden_states: torch.Tensor,
358
+ attention_mask: Optional[torch.Tensor] = None,
359
+ position_ids: Optional[torch.LongTensor] = None,
360
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
361
+ output_attentions: bool = False,
362
+ use_cache: bool = False,
363
+ **kwargs,
364
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
365
+ if 'padding_mask' in kwargs:
366
+ warnings.warn(
367
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
368
+ 'Please make sure use `attention_mask` instead.`'
369
+ )
370
+
371
+ bsz, q_len, _ = hidden_states.size()
372
+
373
+ qkv_states = self.wqkv(hidden_states)
374
+
375
+ qkv_states = rearrange(
376
+ qkv_states,
377
+ 'b q (h gs d) -> b q h gs d',
378
+ gs=2 + self.num_key_value_groups,
379
+ d=self.head_dim,
380
+ )
381
+
382
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
383
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
384
+ key_states = qkv_states[..., -2, :]
385
+ value_states = qkv_states[..., -1, :]
386
+
387
+ query_states = query_states.transpose(1, 2)
388
+ key_states = key_states.transpose(1, 2)
389
+ value_states = value_states.transpose(1, 2)
390
+
391
+ kv_seq_len = key_states.shape[-2]
392
+ if past_key_value is not None:
393
+ kv_seq_len += past_key_value[0].shape[-2]
394
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
395
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
396
+
397
+ if past_key_value is not None:
398
+ # reuse k, v, self_attention
399
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
400
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
401
+
402
+ past_key_value = (key_states, value_states) if use_cache else None
403
+
404
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
405
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
406
+
407
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
408
+
409
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
410
+ raise ValueError(
411
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
412
+ f' {attn_weights.size()}'
413
+ )
414
+
415
+ if attention_mask is not None:
416
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
417
+ raise ValueError(
418
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
419
+ )
420
+ attn_weights = attn_weights + attention_mask
421
+
422
+ # upcast attention to fp32
423
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
424
+ attn_output = torch.matmul(attn_weights, value_states)
425
+
426
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
427
+ raise ValueError(
428
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
429
+ f' {attn_output.size()}'
430
+ )
431
+
432
+ attn_output = attn_output.transpose(1, 2).contiguous()
433
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
434
+
435
+ attn_output = self.wo(attn_output)
436
+
437
+ if not output_attentions:
438
+ attn_weights = None
439
+
440
+ return attn_output, attn_weights, past_key_value
441
+
442
+
443
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
444
+ class InternLM2FlashAttention2(InternLM2Attention):
445
+ """
446
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
447
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
448
+ flash attention and deal with padding tokens in case the input contains any of them.
449
+ """
450
+
451
+ def forward(
452
+ self,
453
+ hidden_states: torch.Tensor,
454
+ attention_mask: Optional[torch.LongTensor] = None,
455
+ position_ids: Optional[torch.LongTensor] = None,
456
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
457
+ output_attentions: bool = False,
458
+ use_cache: bool = False,
459
+ **kwargs,
460
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
461
+ # InternLM2FlashAttention2 attention does not support output_attentions
462
+ if 'padding_mask' in kwargs:
463
+ warnings.warn(
464
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
465
+ 'Please make sure use `attention_mask` instead.`'
466
+ )
467
+
468
+ # overwrite attention_mask with padding_mask
469
+ attention_mask = kwargs.pop('padding_mask')
470
+
471
+ output_attentions = False
472
+
473
+ bsz, q_len, _ = hidden_states.size()
474
+
475
+ qkv_states = self.wqkv(hidden_states)
476
+
477
+ qkv_states = rearrange(
478
+ qkv_states,
479
+ 'b q (h gs d) -> b q h gs d',
480
+ gs=2 + self.num_key_value_groups,
481
+ d=self.head_dim,
482
+ )
483
+
484
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
485
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
486
+ key_states = qkv_states[..., -2, :]
487
+ value_states = qkv_states[..., -1, :]
488
+
489
+ query_states = query_states.transpose(1, 2)
490
+ key_states = key_states.transpose(1, 2)
491
+ value_states = value_states.transpose(1, 2)
492
+
493
+ kv_seq_len = key_states.shape[-2]
494
+ if past_key_value is not None:
495
+ kv_seq_len += past_key_value[0].shape[-2]
496
+
497
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
498
+
499
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
500
+
501
+ if past_key_value is not None:
502
+ # reuse k, v, self_attention
503
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
504
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
505
+
506
+ past_key_value = (key_states, value_states) if use_cache else None
507
+
508
+ query_states = query_states.transpose(1, 2)
509
+ key_states = key_states.transpose(1, 2)
510
+ value_states = value_states.transpose(1, 2)
511
+
512
+ attn_output = self._flash_attention_forward(
513
+ query_states, key_states, value_states, attention_mask, q_len
514
+ )
515
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
516
+ attn_output = self.wo(attn_output)
517
+
518
+ if not output_attentions:
519
+ attn_weights = None
520
+
521
+ return attn_output, attn_weights, past_key_value
522
+
523
+ def _flash_attention_forward(
524
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
525
+ ):
526
+ """
527
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
528
+ first unpad the input, then computes the attention scores and pad the final attention scores.
529
+
530
+ Args:
531
+ query_states (`torch.Tensor`):
532
+ Input query states to be passed to Flash Attention API
533
+ key_states (`torch.Tensor`):
534
+ Input key states to be passed to Flash Attention API
535
+ value_states (`torch.Tensor`):
536
+ Input value states to be passed to Flash Attention API
537
+ attention_mask (`torch.Tensor`):
538
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
539
+ position of padding tokens and 1 for the position of non-padding tokens.
540
+ dropout (`int`, *optional*):
541
+ Attention dropout
542
+ softmax_scale (`float`, *optional*):
543
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
544
+ """
545
+ # Contains at least one padding token in the sequence
546
+ causal = self.is_causal and query_length != 1
547
+ if attention_mask is not None:
548
+ batch_size = query_states.shape[0]
549
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
550
+ query_states, key_states, value_states, attention_mask, query_length
551
+ )
552
+
553
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
554
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
555
+
556
+ attn_output_unpad = flash_attn_varlen_func(
557
+ query_states,
558
+ key_states,
559
+ value_states,
560
+ cu_seqlens_q=cu_seqlens_q,
561
+ cu_seqlens_k=cu_seqlens_k,
562
+ max_seqlen_q=max_seqlen_in_batch_q,
563
+ max_seqlen_k=max_seqlen_in_batch_k,
564
+ dropout_p=dropout,
565
+ softmax_scale=softmax_scale,
566
+ causal=causal,
567
+ )
568
+
569
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
570
+ else:
571
+ attn_output = flash_attn_func(
572
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
573
+ )
574
+
575
+ return attn_output
576
+
577
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
578
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
579
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
580
+
581
+ key_layer = index_first_axis(
582
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
583
+ )
584
+ value_layer = index_first_axis(
585
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
586
+ )
587
+
588
+ if query_length == kv_seq_len:
589
+ query_layer = index_first_axis(
590
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
591
+ )
592
+ cu_seqlens_q = cu_seqlens_k
593
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
594
+ indices_q = indices_k
595
+ elif query_length == 1:
596
+ max_seqlen_in_batch_q = 1
597
+ cu_seqlens_q = torch.arange(
598
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
599
+ ) # There is a memcpy here, that is very bad.
600
+ indices_q = cu_seqlens_q[:-1]
601
+ query_layer = query_layer.squeeze(1)
602
+ else:
603
+ # The -q_len: slice assumes left padding.
604
+ attention_mask = attention_mask[:, -query_length:]
605
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
606
+
607
+ return (
608
+ query_layer,
609
+ key_layer,
610
+ value_layer,
611
+ indices_q.to(torch.int64),
612
+ (cu_seqlens_q, cu_seqlens_k),
613
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
614
+ )
615
+
616
+
617
+ INTERNLM2_ATTENTION_CLASSES = {
618
+ 'eager': InternLM2Attention,
619
+ 'flash_attention_2': InternLM2FlashAttention2,
620
+ }
621
+
622
+
623
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
624
+ class InternLM2DecoderLayer(nn.Module):
625
+ def __init__(self, config: InternLM2Config):
626
+ super().__init__()
627
+ self.hidden_size = config.hidden_size
628
+
629
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
630
+
631
+ self.feed_forward = InternLM2MLP(config)
632
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
633
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
634
+
635
+ def forward(
636
+ self,
637
+ hidden_states: torch.Tensor,
638
+ attention_mask: Optional[torch.Tensor] = None,
639
+ position_ids: Optional[torch.LongTensor] = None,
640
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
641
+ output_attentions: Optional[bool] = False,
642
+ use_cache: Optional[bool] = False,
643
+ **kwargs,
644
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
645
+ """
646
+ Args:
647
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
648
+ attention_mask (`torch.FloatTensor`, *optional*):
649
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
650
+ query_sequence_length, key_sequence_length)` if default attention is used.
651
+ output_attentions (`bool`, *optional*):
652
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
653
+ returned tensors for more detail.
654
+ use_cache (`bool`, *optional*):
655
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
656
+ (see `past_key_values`).
657
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
658
+ """
659
+ if 'padding_mask' in kwargs:
660
+ warnings.warn(
661
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
662
+ 'Please make sure use `attention_mask` instead.`'
663
+ )
664
+
665
+ residual = hidden_states
666
+
667
+ hidden_states = self.attention_norm(hidden_states)
668
+
669
+ # Self Attention
670
+ hidden_states, self_attn_weights, present_key_value = self.attention(
671
+ hidden_states=hidden_states,
672
+ attention_mask=attention_mask,
673
+ position_ids=position_ids,
674
+ past_key_value=past_key_value,
675
+ output_attentions=output_attentions,
676
+ use_cache=use_cache,
677
+ **kwargs,
678
+ )
679
+ hidden_states = residual + hidden_states
680
+
681
+ # Fully Connected
682
+ residual = hidden_states
683
+ hidden_states = self.ffn_norm(hidden_states)
684
+ hidden_states = self.feed_forward(hidden_states)
685
+ hidden_states = residual + hidden_states
686
+
687
+ outputs = (hidden_states,)
688
+
689
+ if output_attentions:
690
+ outputs += (self_attn_weights,)
691
+
692
+ if use_cache:
693
+ outputs += (present_key_value,)
694
+
695
+ return outputs
696
+
697
+
698
+ InternLM2_START_DOCSTRING = r"""
699
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
700
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
701
+ etc.)
702
+
703
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
704
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
705
+ and behavior.
706
+
707
+ Parameters:
708
+ config ([`InternLM2Config`]):
709
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
710
+ load the weights associated with the model, only the configuration. Check out the
711
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
712
+ """
713
+
714
+
715
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
716
+ @add_start_docstrings(
717
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
718
+ InternLM2_START_DOCSTRING,
719
+ )
720
+ class InternLM2PreTrainedModel(PreTrainedModel):
721
+ config_class = InternLM2Config
722
+ base_model_prefix = 'model'
723
+ supports_gradient_checkpointing = True
724
+ _no_split_modules = ['InternLM2DecoderLayer']
725
+ _skip_keys_device_placement = 'past_key_values'
726
+ _supports_flash_attn_2 = True
727
+
728
+ def _init_weights(self, module):
729
+ std = self.config.initializer_range
730
+ if isinstance(module, nn.Linear):
731
+ module.weight.data.normal_(mean=0.0, std=std)
732
+ if module.bias is not None:
733
+ module.bias.data.zero_()
734
+ elif isinstance(module, nn.Embedding):
735
+ module.weight.data.normal_(mean=0.0, std=std)
736
+ if module.padding_idx is not None:
737
+ module.weight.data[module.padding_idx].zero_()
738
+
739
+
740
+ InternLM2_INPUTS_DOCSTRING = r"""
741
+ Args:
742
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
743
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
744
+ it.
745
+
746
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
747
+ [`PreTrainedTokenizer.__call__`] for details.
748
+
749
+ [What are input IDs?](../glossary#input-ids)
750
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
751
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
752
+
753
+ - 1 for tokens that are **not masked**,
754
+ - 0 for tokens that are **masked**.
755
+
756
+ [What are attention masks?](../glossary#attention-mask)
757
+
758
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
759
+ [`PreTrainedTokenizer.__call__`] for details.
760
+
761
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
762
+ `past_key_values`).
763
+
764
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
765
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
766
+ information on the default strategy.
767
+
768
+ - 1 indicates the head is **not masked**,
769
+ - 0 indicates the head is **masked**.
770
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
771
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
772
+ config.n_positions - 1]`.
773
+
774
+ [What are position IDs?](../glossary#position-ids)
775
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
776
+ when `config.use_cache=True`):
777
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
778
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
779
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
780
+
781
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
782
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
783
+
784
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
785
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
786
+ of shape `(batch_size, sequence_length)`.
787
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
788
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
789
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
790
+ model's internal embedding lookup matrix.
791
+ use_cache (`bool`, *optional*):
792
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
793
+ `past_key_values`).
794
+ output_attentions (`bool`, *optional*):
795
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
796
+ tensors for more detail.
797
+ output_hidden_states (`bool`, *optional*):
798
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
799
+ more detail.
800
+ return_dict (`bool`, *optional*):
801
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
802
+ """
803
+
804
+
805
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
806
+ @add_start_docstrings(
807
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
808
+ InternLM2_START_DOCSTRING,
809
+ )
810
+ class InternLM2Model(InternLM2PreTrainedModel):
811
+ """
812
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
813
+
814
+ Args:
815
+ config: InternLM2Config
816
+ """
817
+
818
+ _auto_class = 'AutoModel'
819
+
820
+ def __init__(self, config: InternLM2Config):
821
+ super().__init__(config)
822
+ self.padding_idx = config.pad_token_id
823
+ self.vocab_size = config.vocab_size
824
+ self.config = config
825
+ if not has_flash_attn:
826
+ self.config.attn_implementation = 'eager'
827
+ print('Warning: Flash attention is not available, using eager attention instead.')
828
+
829
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
830
+
831
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
832
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
833
+
834
+ self.gradient_checkpointing = False
835
+ # Initialize weights and apply final processing
836
+ self.post_init()
837
+
838
+ def get_input_embeddings(self):
839
+ return self.tok_embeddings
840
+
841
+ def set_input_embeddings(self, value):
842
+ self.tok_embeddings = value
843
+
844
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
845
+ # create causal mask
846
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
847
+ combined_attention_mask = None
848
+ if input_shape[-1] > 1:
849
+ combined_attention_mask = _make_causal_mask(
850
+ input_shape,
851
+ inputs_embeds.dtype,
852
+ device=inputs_embeds.device,
853
+ past_key_values_length=past_key_values_length,
854
+ )
855
+
856
+ if attention_mask is not None:
857
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
858
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
859
+ inputs_embeds.device
860
+ )
861
+ combined_attention_mask = (
862
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
863
+ )
864
+
865
+ return combined_attention_mask
866
+
867
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
868
+ def forward(
869
+ self,
870
+ input_ids: torch.LongTensor = None,
871
+ attention_mask: Optional[torch.Tensor] = None,
872
+ position_ids: Optional[torch.LongTensor] = None,
873
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
874
+ inputs_embeds: Optional[torch.FloatTensor] = None,
875
+ use_cache: Optional[bool] = None,
876
+ output_attentions: Optional[bool] = None,
877
+ output_hidden_states: Optional[bool] = None,
878
+ return_dict: Optional[bool] = None,
879
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
880
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
881
+ output_hidden_states = (
882
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
883
+ )
884
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
885
+
886
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
887
+
888
+ if self.config.attn_implementation == 'flash_attention_2':
889
+ _import_flash_attn()
890
+
891
+ # retrieve input_ids and inputs_embeds
892
+ if input_ids is not None and inputs_embeds is not None:
893
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
894
+ elif input_ids is not None:
895
+ batch_size, seq_length = input_ids.shape[:2]
896
+ elif inputs_embeds is not None:
897
+ batch_size, seq_length = inputs_embeds.shape[:2]
898
+ else:
899
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
900
+
901
+ seq_length_with_past = seq_length
902
+ past_key_values_length = 0
903
+ if past_key_values is not None:
904
+ past_key_values_length = past_key_values[0][0].shape[2]
905
+ seq_length_with_past = seq_length_with_past + past_key_values_length
906
+
907
+ if position_ids is None:
908
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
909
+ position_ids = torch.arange(
910
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
911
+ )
912
+ position_ids = position_ids.unsqueeze(0)
913
+
914
+ if inputs_embeds is None:
915
+ inputs_embeds = self.tok_embeddings(input_ids)
916
+
917
+ if self.config.attn_implementation == 'flash_attention_2':
918
+ # 2d mask is passed through the layers
919
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
920
+ else:
921
+ if attention_mask is None:
922
+ attention_mask = torch.ones(
923
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
924
+ )
925
+ attention_mask = self._prepare_decoder_attention_mask(
926
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
927
+ )
928
+
929
+ # embed positions
930
+ hidden_states = inputs_embeds
931
+
932
+ if self.gradient_checkpointing and self.training:
933
+ if use_cache:
934
+ logger.warning_once(
935
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
936
+ )
937
+ use_cache = False
938
+
939
+ # decoder layers
940
+ all_hidden_states = () if output_hidden_states else None
941
+ all_self_attns = () if output_attentions else None
942
+ next_decoder_cache = () if use_cache else None
943
+
944
+ for idx, decoder_layer in enumerate(self.layers):
945
+ if output_hidden_states:
946
+ all_hidden_states += (hidden_states,)
947
+
948
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
949
+
950
+ if self.gradient_checkpointing and self.training:
951
+
952
+ def create_custom_forward(module):
953
+ def custom_forward(*inputs):
954
+ # None for past_key_value
955
+ return module(*inputs, output_attentions, None)
956
+
957
+ return custom_forward
958
+
959
+ layer_outputs = torch.utils.checkpoint.checkpoint(
960
+ create_custom_forward(decoder_layer),
961
+ hidden_states,
962
+ attention_mask,
963
+ position_ids,
964
+ None,
965
+ )
966
+ else:
967
+ layer_outputs = decoder_layer(
968
+ hidden_states,
969
+ attention_mask=attention_mask,
970
+ position_ids=position_ids,
971
+ past_key_value=past_key_value,
972
+ output_attentions=output_attentions,
973
+ use_cache=use_cache,
974
+ )
975
+
976
+ hidden_states = layer_outputs[0]
977
+
978
+ if use_cache:
979
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
980
+
981
+ if output_attentions:
982
+ all_self_attns += (layer_outputs[1],)
983
+
984
+ hidden_states = self.norm(hidden_states)
985
+
986
+ # add hidden states from the last decoder layer
987
+ if output_hidden_states:
988
+ all_hidden_states += (hidden_states,)
989
+
990
+ next_cache = next_decoder_cache if use_cache else None
991
+ if not return_dict:
992
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
993
+ return BaseModelOutputWithPast(
994
+ last_hidden_state=hidden_states,
995
+ past_key_values=next_cache,
996
+ hidden_states=all_hidden_states,
997
+ attentions=all_self_attns,
998
+ )
999
+
1000
+
1001
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
1002
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1003
+ _auto_class = 'AutoModelForCausalLM'
1004
+
1005
+ _tied_weights_keys = ['output.weight']
1006
+
1007
+ def __init__(self, config):
1008
+ super().__init__(config)
1009
+ self.model = InternLM2Model(config)
1010
+ self.vocab_size = config.vocab_size
1011
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1012
+
1013
+ # Initialize weights and apply final processing
1014
+ self.post_init()
1015
+
1016
+ def get_input_embeddings(self):
1017
+ return self.model.tok_embeddings
1018
+
1019
+ def set_input_embeddings(self, value):
1020
+ self.model.tok_embeddings = value
1021
+
1022
+ def get_output_embeddings(self):
1023
+ return self.output
1024
+
1025
+ def set_output_embeddings(self, new_embeddings):
1026
+ self.output = new_embeddings
1027
+
1028
+ def set_decoder(self, decoder):
1029
+ self.model = decoder
1030
+
1031
+ def get_decoder(self):
1032
+ return self.model
1033
+
1034
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1035
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1036
+ def forward(
1037
+ self,
1038
+ input_ids: torch.LongTensor = None,
1039
+ attention_mask: Optional[torch.Tensor] = None,
1040
+ position_ids: Optional[torch.LongTensor] = None,
1041
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1042
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1043
+ labels: Optional[torch.LongTensor] = None,
1044
+ use_cache: Optional[bool] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1049
+ r"""
1050
+ Args:
1051
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1052
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1053
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1054
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1055
+
1056
+ Returns:
1057
+
1058
+ Example:
1059
+
1060
+ ```python
1061
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1062
+
1063
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1064
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1065
+
1066
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1067
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1068
+
1069
+ >>> # Generate
1070
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1071
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1072
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1073
+ ```"""
1074
+
1075
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1076
+ output_hidden_states = (
1077
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1078
+ )
1079
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1080
+
1081
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1082
+ outputs = self.model(
1083
+ input_ids=input_ids,
1084
+ attention_mask=attention_mask,
1085
+ position_ids=position_ids,
1086
+ past_key_values=past_key_values,
1087
+ inputs_embeds=inputs_embeds,
1088
+ use_cache=use_cache,
1089
+ output_attentions=output_attentions,
1090
+ output_hidden_states=output_hidden_states,
1091
+ return_dict=return_dict,
1092
+ )
1093
+
1094
+ hidden_states = outputs[0]
1095
+ logits = self.output(hidden_states)
1096
+ logits = logits.float()
1097
+
1098
+ loss = None
1099
+ if labels is not None:
1100
+ # Shift so that tokens < n predict n
1101
+ shift_logits = logits[..., :-1, :].contiguous()
1102
+ shift_labels = labels[..., 1:].contiguous()
1103
+ # Flatten the tokens
1104
+ loss_fct = CrossEntropyLoss()
1105
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1106
+ shift_labels = shift_labels.view(-1)
1107
+ # Enable model parallelism
1108
+ shift_labels = shift_labels.to(shift_logits.device)
1109
+ loss = loss_fct(shift_logits, shift_labels)
1110
+
1111
+ if not return_dict:
1112
+ output = (logits,) + outputs[1:]
1113
+ return (loss,) + output if loss is not None else output
1114
+
1115
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1116
+ output = CausalLMOutputWithPast(
1117
+ loss=loss,
1118
+ logits=logits,
1119
+ past_key_values=outputs.past_key_values,
1120
+ hidden_states=outputs.hidden_states,
1121
+ attentions=outputs.attentions,
1122
+ )
1123
+ output['logits'] = output['logits'].to(device)
1124
+ return output
1125
+
1126
+ def prepare_inputs_for_generation(
1127
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1128
+ ):
1129
+ if past_key_values is not None:
1130
+ past_length = past_key_values[0][0].shape[2]
1131
+
1132
+ # Some generation methods already pass only the last input ID
1133
+ if input_ids.shape[1] > past_length:
1134
+ remove_prefix_length = past_length
1135
+ else:
1136
+ # Default to old behavior: keep only final ID
1137
+ remove_prefix_length = input_ids.shape[1] - 1
1138
+
1139
+ input_ids = input_ids[:, remove_prefix_length:]
1140
+
1141
+ position_ids = kwargs.get('position_ids', None)
1142
+ if attention_mask is not None and position_ids is None:
1143
+ # create position_ids on the fly for batch generation
1144
+ position_ids = attention_mask.long().cumsum(-1) - 1
1145
+ position_ids.masked_fill_(attention_mask == 0, 1)
1146
+ if past_key_values:
1147
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1148
+
1149
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1150
+ if inputs_embeds is not None and past_key_values is None:
1151
+ model_inputs = {'inputs_embeds': inputs_embeds}
1152
+ else:
1153
+ model_inputs = {'input_ids': input_ids}
1154
+
1155
+ model_inputs.update(
1156
+ {
1157
+ 'position_ids': position_ids,
1158
+ 'past_key_values': past_key_values,
1159
+ 'use_cache': kwargs.get('use_cache'),
1160
+ 'attention_mask': attention_mask,
1161
+ }
1162
+ )
1163
+ return model_inputs
1164
+
1165
+ @staticmethod
1166
+ def _reorder_cache(past_key_values, beam_idx):
1167
+ reordered_past = ()
1168
+ for layer_past in past_key_values:
1169
+ reordered_past += (
1170
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1171
+ )
1172
+ return reordered_past
1173
+
1174
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
1175
+ if tokenizer.add_bos_token:
1176
+ prompt = ''
1177
+ else:
1178
+ prompt = tokenizer.bos_token
1179
+ if meta_instruction:
1180
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1181
+ for record in history:
1182
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1183
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1184
+ return tokenizer([prompt], return_tensors='pt')
1185
+
1186
+ @torch.no_grad()
1187
+ def chat(
1188
+ self,
1189
+ tokenizer,
1190
+ query: str,
1191
+ history: List[Tuple[str, str]] = [],
1192
+ streamer: Optional[BaseStreamer] = None,
1193
+ max_new_tokens: int = 1024,
1194
+ do_sample: bool = True,
1195
+ temperature: float = 0.8,
1196
+ top_p: float = 0.8,
1197
+ meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
1198
+ '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
1199
+ '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
1200
+ **kwargs,
1201
+ ):
1202
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1203
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1204
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1205
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
1206
+ outputs = self.generate(
1207
+ **inputs,
1208
+ streamer=streamer,
1209
+ max_new_tokens=max_new_tokens,
1210
+ do_sample=do_sample,
1211
+ temperature=temperature,
1212
+ top_p=top_p,
1213
+ eos_token_id=eos_token_id,
1214
+ **kwargs,
1215
+ )
1216
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):]
1217
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1218
+ response = response.split('<|im_end|>')[0]
1219
+ history = history + [(query, response)]
1220
+ return response, history
1221
+
1222
+ @torch.no_grad()
1223
+ def stream_chat(
1224
+ self,
1225
+ tokenizer,
1226
+ query: str,
1227
+ history: List[Tuple[str, str]] = [],
1228
+ max_new_tokens: int = 1024,
1229
+ do_sample: bool = True,
1230
+ temperature: float = 0.8,
1231
+ top_p: float = 0.8,
1232
+ **kwargs,
1233
+ ):
1234
+ """
1235
+ Return a generator in format: (response, history)
1236
+ Eg.
1237
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1238
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1239
+ """
1240
+ if BaseStreamer is None:
1241
+ raise ModuleNotFoundError(
1242
+ 'The version of `transformers` is too low. Please make sure '
1243
+ 'that you have installed `transformers>=4.28.0`.'
1244
+ )
1245
+
1246
+ response_queue = queue.Queue(maxsize=20)
1247
+
1248
+ class ChatStreamer(BaseStreamer):
1249
+ def __init__(self, tokenizer) -> None:
1250
+ super().__init__()
1251
+ self.tokenizer = tokenizer
1252
+ self.queue = response_queue
1253
+ self.query = query
1254
+ self.history = history
1255
+ self.response = ''
1256
+ self.cache = []
1257
+ self.received_inputs = False
1258
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1259
+
1260
+ def put(self, value):
1261
+ if len(value.shape) > 1 and value.shape[0] > 1:
1262
+ raise ValueError('ChatStreamer only supports batch size 1')
1263
+ elif len(value.shape) > 1:
1264
+ value = value[0]
1265
+
1266
+ if not self.received_inputs:
1267
+ # The first received value is input_ids, ignore here
1268
+ self.received_inputs = True
1269
+ return
1270
+
1271
+ self.cache.extend(value.tolist())
1272
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1273
+ if token.strip() != '<|im_end|>':
1274
+ self.response = self.response + token
1275
+ history = self.history + [(self.query, self.response)]
1276
+ self.queue.put((self.response, history))
1277
+ self.cache = []
1278
+ else:
1279
+ self.end()
1280
+
1281
+ def end(self):
1282
+ self.queue.put(None)
1283
+
1284
+ def stream_producer():
1285
+ return self.chat(
1286
+ tokenizer=tokenizer,
1287
+ query=query,
1288
+ streamer=ChatStreamer(tokenizer=tokenizer),
1289
+ history=history,
1290
+ max_new_tokens=max_new_tokens,
1291
+ do_sample=do_sample,
1292
+ temperature=temperature,
1293
+ top_p=top_p,
1294
+ **kwargs,
1295
+ )
1296
+
1297
+ def consumer():
1298
+ producer = threading.Thread(target=stream_producer)
1299
+ producer.start()
1300
+ while True:
1301
+ res = response_queue.get()
1302
+ if res is None:
1303
+ return
1304
+ yield res
1305
+
1306
+ return consumer()
1307
+
1308
+
1309
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1310
+ @add_start_docstrings(
1311
+ """
1312
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1313
+
1314
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1315
+ as other causal models (e.g. GPT-2) do.
1316
+
1317
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1318
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1319
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1320
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1321
+ each row of the batch).
1322
+ """,
1323
+ InternLM2_START_DOCSTRING,
1324
+ )
1325
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1326
+ def __init__(self, config):
1327
+ super().__init__(config)
1328
+ self.num_labels = config.num_labels
1329
+ self.model = InternLM2Model(config)
1330
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1331
+
1332
+ # Initialize weights and apply final processing
1333
+ self.post_init()
1334
+
1335
+ def get_input_embeddings(self):
1336
+ return self.model.tok_embeddings
1337
+
1338
+ def set_input_embeddings(self, value):
1339
+ self.model.tok_embeddings = value
1340
+
1341
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1342
+ def forward(
1343
+ self,
1344
+ input_ids: torch.LongTensor = None,
1345
+ attention_mask: Optional[torch.Tensor] = None,
1346
+ position_ids: Optional[torch.LongTensor] = None,
1347
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1348
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1349
+ labels: Optional[torch.LongTensor] = None,
1350
+ use_cache: Optional[bool] = None,
1351
+ output_attentions: Optional[bool] = None,
1352
+ output_hidden_states: Optional[bool] = None,
1353
+ return_dict: Optional[bool] = None,
1354
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1355
+ r"""
1356
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1357
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1358
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1359
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1360
+ """
1361
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1362
+
1363
+ transformer_outputs = self.model(
1364
+ input_ids,
1365
+ attention_mask=attention_mask,
1366
+ position_ids=position_ids,
1367
+ past_key_values=past_key_values,
1368
+ inputs_embeds=inputs_embeds,
1369
+ use_cache=use_cache,
1370
+ output_attentions=output_attentions,
1371
+ output_hidden_states=output_hidden_states,
1372
+ return_dict=return_dict,
1373
+ )
1374
+ hidden_states = transformer_outputs[0]
1375
+ logits = self.score(hidden_states)
1376
+
1377
+ if input_ids is not None:
1378
+ batch_size = input_ids.shape[0]
1379
+ else:
1380
+ batch_size = inputs_embeds.shape[0]
1381
+
1382
+ if self.config.pad_token_id is None and batch_size != 1:
1383
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1384
+ if self.config.pad_token_id is None:
1385
+ sequence_lengths = -1
1386
+ else:
1387
+ if input_ids is not None:
1388
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1389
+ logits.device
1390
+ )
1391
+ else:
1392
+ sequence_lengths = -1
1393
+
1394
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1395
+
1396
+ loss = None
1397
+ if labels is not None:
1398
+ labels = labels.to(logits.device)
1399
+ if self.config.problem_type is None:
1400
+ if self.num_labels == 1:
1401
+ self.config.problem_type = 'regression'
1402
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1403
+ self.config.problem_type = 'single_label_classification'
1404
+ else:
1405
+ self.config.problem_type = 'multi_label_classification'
1406
+
1407
+ if self.config.problem_type == 'regression':
1408
+ loss_fct = MSELoss()
1409
+ if self.num_labels == 1:
1410
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1411
+ else:
1412
+ loss = loss_fct(pooled_logits, labels)
1413
+ elif self.config.problem_type == 'single_label_classification':
1414
+ loss_fct = CrossEntropyLoss()
1415
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1416
+ elif self.config.problem_type == 'multi_label_classification':
1417
+ loss_fct = BCEWithLogitsLoss()
1418
+ loss = loss_fct(pooled_logits, labels)
1419
+ if not return_dict:
1420
+ output = (pooled_logits,) + transformer_outputs[1:]
1421
+ return ((loss,) + output) if loss is not None else output
1422
+
1423
+ return SequenceClassifierOutputWithPast(
1424
+ loss=loss,
1425
+ logits=pooled_logits,
1426
+ past_key_values=transformer_outputs.past_key_values,
1427
+ hidden_states=transformer_outputs.hidden_states,
1428
+ attentions=transformer_outputs.attentions,
1429
+ )
modeling_phi3.py ADDED
@@ -0,0 +1,1610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ PyTorch Phi-3 model."""
16
+
17
+ import inspect
18
+ import math
19
+ import warnings
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+ from transformers.activations import ACT2FN
28
+ from transformers.cache_utils import Cache, DynamicCache
29
+ from transformers.modeling_attn_mask_utils import \
30
+ _prepare_4d_causal_attention_mask
31
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
32
+ CausalLMOutputWithPast,
33
+ SequenceClassifierOutputWithPast,
34
+ TokenClassifierOutput)
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import (add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_flash_attn_2_available,
40
+ is_flash_attn_greater_or_equal_2_10, logging,
41
+ replace_return_docstrings)
42
+
43
+ from .configuration_phi3 import Phi3Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ # Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
48
+ # if is_flash_attn_2_available():
49
+ _flash_supports_window_size = False
50
+ try:
51
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
52
+ from flash_attn.bert_padding import (index_first_axis, pad_input, # noqa
53
+ unpad_input)
54
+
55
+ _flash_supports_window_size = 'window_size' in list(inspect.signature(flash_attn_func).parameters)
56
+ has_flash_attn = True
57
+ except ImportError as error:
58
+ logger.warning(
59
+ f'`flash-attention` package not found, consider installing for better performance: {error}.'
60
+ )
61
+ if not _flash_supports_window_size:
62
+ logger.warning(
63
+ "Current `flash-attenton` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
64
+ )
65
+ has_flash_attn = False
66
+
67
+ _CHECKPOINT_FOR_DOC = 'microsoft/Phi-3-mini-4k-instruct'
68
+ _CONFIG_FOR_DOC = 'Phi3Config'
69
+
70
+ PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
71
+ 'microsoft/Phi-3-mini-4k-instruct',
72
+ 'microsoft/Phi-3-mini-128k-instruct',
73
+ # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
74
+ ]
75
+
76
+
77
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
78
+ class Phi3RMSNorm(nn.Module):
79
+ def __init__(self, hidden_size, eps=1e-6):
80
+ """
81
+ Phi3RMSNorm is equivalent to T5LayerNorm
82
+ """
83
+ super().__init__()
84
+ self.weight = nn.Parameter(torch.ones(hidden_size))
85
+ self.variance_epsilon = eps
86
+
87
+ def forward(self, hidden_states):
88
+ input_dtype = hidden_states.dtype
89
+ hidden_states = hidden_states.to(torch.float32)
90
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
91
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
92
+ return self.weight * hidden_states.to(input_dtype)
93
+
94
+
95
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
96
+ def _get_unpad_data(attention_mask):
97
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
98
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
99
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
100
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
101
+ return (
102
+ indices,
103
+ cu_seqlens,
104
+ max_seqlen_in_batch,
105
+ )
106
+
107
+
108
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
109
+ class Phi3RotaryEmbedding(nn.Module):
110
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
111
+ super().__init__()
112
+
113
+ self.dim = dim
114
+ self.max_position_embeddings = max_position_embeddings
115
+ self.base = base
116
+ self.register_buffer('inv_freq', None, persistent=False)
117
+
118
+ @torch.no_grad()
119
+ def forward(self, x, position_ids, seq_len=None):
120
+ # x: [bs, num_attention_heads, seq_len, head_size]
121
+ if self.inv_freq is None:
122
+ self.inv_freq = 1.0 / (
123
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
124
+ )
125
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
126
+ position_ids_expanded = position_ids[:, None, :].float()
127
+ # Force float32 since bfloat16 loses precision on long contexts
128
+ # See https://github.com/huggingface/transformers/pull/29285
129
+ device_type = x.device.type
130
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
131
+ with torch.autocast(device_type=device_type, enabled=False):
132
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
133
+ emb = torch.cat((freqs, freqs), dim=-1)
134
+ cos = emb.cos()
135
+ sin = emb.sin()
136
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
137
+
138
+
139
+ class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding):
140
+ def __init__(self, dim, config, device=None):
141
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
142
+
143
+ self.short_factor = config.rope_scaling['short_factor']
144
+ self.long_factor = config.rope_scaling['long_factor']
145
+ self.original_max_position_embeddings = config.original_max_position_embeddings
146
+
147
+ @torch.no_grad()
148
+ def forward(self, x, position_ids, seq_len=None):
149
+ seq_len = torch.max(position_ids) + 1
150
+ if seq_len > self.original_max_position_embeddings:
151
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
152
+ else:
153
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
154
+
155
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
156
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
157
+
158
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
159
+ position_ids_expanded = position_ids[:, None, :].float()
160
+
161
+ # Force float32 since bfloat16 loses precision on long contexts
162
+ # See https://github.com/huggingface/transformers/pull/29285
163
+ device_type = x.device.type
164
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
165
+ with torch.autocast(device_type=device_type, enabled=False):
166
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
167
+ emb = torch.cat((freqs, freqs), dim=-1)
168
+
169
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
170
+ if scale <= 1.0:
171
+ scaling_factor = 1.0
172
+ else:
173
+ scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
174
+
175
+ cos = emb.cos() * scaling_factor
176
+ sin = emb.sin() * scaling_factor
177
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
178
+
179
+
180
+ class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding):
181
+ def __init__(self, dim, config, device=None):
182
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
183
+
184
+ self.short_factor = config.rope_scaling['short_factor']
185
+ self.long_factor = config.rope_scaling['long_factor']
186
+ self.original_max_position_embeddings = config.original_max_position_embeddings
187
+
188
+ @torch.no_grad()
189
+ def forward(self, x, position_ids, seq_len=None):
190
+ seq_len = torch.max(position_ids) + 1
191
+ if seq_len > self.original_max_position_embeddings:
192
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
193
+ else:
194
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
195
+
196
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
197
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
198
+
199
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
200
+ position_ids_expanded = position_ids[:, None, :].float()
201
+
202
+ # Force float32 since bfloat16 loses precision on long contexts
203
+ # See https://github.com/huggingface/transformers/pull/29285
204
+ device_type = x.device.type
205
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
206
+ with torch.autocast(device_type=device_type, enabled=False):
207
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
208
+ emb = torch.cat((freqs, freqs), dim=-1)
209
+
210
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
211
+ if scale <= 1.0:
212
+ scaling_factor = 1.0
213
+ else:
214
+ scaling_factor = 0.1 * math.log(scale) + 1.0
215
+
216
+ cos = emb.cos() * scaling_factor
217
+ sin = emb.sin() * scaling_factor
218
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
219
+
220
+
221
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
222
+ def rotate_half(x):
223
+ """Rotates half the hidden dims of the input."""
224
+ x1 = x[..., : x.shape[-1] // 2]
225
+ x2 = x[..., x.shape[-1] // 2 :]
226
+ return torch.cat((-x2, x1), dim=-1)
227
+
228
+
229
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
230
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
231
+ """Applies Rotary Position Embedding to the query and key tensors.
232
+
233
+ Args:
234
+ q (`torch.Tensor`): The query tensor.
235
+ k (`torch.Tensor`): The key tensor.
236
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
237
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
238
+ position_ids (`torch.Tensor`, *optional*):
239
+ Deprecated and unused.
240
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
241
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
242
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
243
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
244
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
245
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
246
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
247
+ Returns:
248
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
249
+ """
250
+ cos = cos.unsqueeze(unsqueeze_dim)
251
+ sin = sin.unsqueeze(unsqueeze_dim)
252
+ q_embed = (q * cos) + (rotate_half(q) * sin)
253
+ k_embed = (k * cos) + (rotate_half(k) * sin)
254
+ return q_embed, k_embed
255
+
256
+
257
+ class Phi3MLP(nn.Module):
258
+ def __init__(self, config):
259
+ super().__init__()
260
+
261
+ self.config = config
262
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
263
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
264
+
265
+ self.activation_fn = ACT2FN[config.hidden_act]
266
+
267
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
268
+ up_states = self.gate_up_proj(hidden_states)
269
+
270
+ gate, up_states = up_states.chunk(2, dim=-1)
271
+ up_states = up_states * self.activation_fn(gate)
272
+
273
+ return self.down_proj(up_states)
274
+
275
+
276
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
277
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
278
+ """
279
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
280
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
281
+ """
282
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
283
+ if n_rep == 1:
284
+ return hidden_states
285
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
286
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
287
+
288
+
289
+ class Phi3Attention(nn.Module):
290
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
291
+
292
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
293
+ super().__init__()
294
+ self.config = config
295
+ self.layer_idx = layer_idx
296
+ if layer_idx is None:
297
+ logger.warning_once(
298
+ f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will '
299
+ 'lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` '
300
+ 'when creating this class.'
301
+ )
302
+
303
+ self.attention_dropout = config.attention_dropout
304
+ self.hidden_size = config.hidden_size
305
+ self.num_heads = config.num_attention_heads
306
+ self.head_dim = self.hidden_size // self.num_heads
307
+ self.num_key_value_heads = config.num_key_value_heads
308
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
309
+ self.max_position_embeddings = config.max_position_embeddings
310
+ self.original_max_position_embeddings = config.original_max_position_embeddings
311
+ self.rope_theta = config.rope_theta
312
+ self.rope_scaling = config.rope_scaling
313
+ self.is_causal = True
314
+
315
+ if (self.head_dim * self.num_heads) != self.hidden_size:
316
+ raise ValueError(
317
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
318
+ f' and `num_heads`: {self.num_heads}).'
319
+ )
320
+
321
+ op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
322
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
323
+ self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
324
+ self._init_rope()
325
+
326
+ def _init_rope(self):
327
+ if self.rope_scaling is None:
328
+ self.rotary_emb = Phi3RotaryEmbedding(
329
+ self.head_dim,
330
+ max_position_embeddings=self.max_position_embeddings,
331
+ base=self.rope_theta,
332
+ )
333
+ else:
334
+ scaling_type = self.config.rope_scaling['type']
335
+ if scaling_type == 'su':
336
+ self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config)
337
+ elif scaling_type == 'yarn':
338
+ self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config)
339
+ else:
340
+ raise ValueError(f'Unknown RoPE scaling type {scaling_type}')
341
+
342
+ def forward(
343
+ self,
344
+ hidden_states: torch.Tensor,
345
+ attention_mask: Optional[torch.Tensor] = None,
346
+ position_ids: Optional[torch.LongTensor] = None,
347
+ past_key_value: Optional[Cache] = None,
348
+ output_attentions: bool = False,
349
+ use_cache: bool = False,
350
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
351
+ logger.warning_once('You are not running the flash-attention implementation, expect numerical differences.')
352
+
353
+ bsz, q_len, _ = hidden_states.size()
354
+
355
+ qkv = self.qkv_proj(hidden_states)
356
+ query_pos = self.num_heads * self.head_dim
357
+ query_states = qkv[..., :query_pos]
358
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
359
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
360
+
361
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
362
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
363
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
364
+
365
+ kv_seq_len = key_states.shape[-2]
366
+ if past_key_value is not None:
367
+ if self.layer_idx is None:
368
+ raise ValueError(
369
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
370
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
371
+ 'with a layer index.'
372
+ )
373
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
374
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
375
+
376
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
377
+
378
+ if past_key_value is not None:
379
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
380
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
381
+
382
+ # repeat k/v heads if n_kv_heads < n_heads
383
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
384
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
385
+
386
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
387
+
388
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
389
+ raise ValueError(
390
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
391
+ f' {attn_weights.size()}'
392
+ )
393
+
394
+ if attention_mask is not None:
395
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
396
+ raise ValueError(
397
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
398
+ )
399
+ attn_weights = attn_weights + attention_mask
400
+
401
+ # upcast attention to fp32
402
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
403
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
404
+
405
+ attn_output = torch.matmul(attn_weights, value_states)
406
+
407
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
408
+ raise ValueError(
409
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
410
+ f' {attn_output.size()}'
411
+ )
412
+
413
+ attn_output = attn_output.transpose(1, 2).contiguous()
414
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
415
+
416
+ attn_output = self.o_proj(attn_output)
417
+
418
+ if not output_attentions:
419
+ attn_weights = None
420
+
421
+ return attn_output, attn_weights, past_key_value
422
+
423
+
424
+ class Phi3FlashAttention2(Phi3Attention):
425
+ """
426
+ Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
427
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
428
+ flash attention and deal with padding tokens in case the input contains any of them.
429
+ """
430
+
431
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
432
+ def __init__(self, *args, **kwargs):
433
+ super().__init__(*args, **kwargs)
434
+
435
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
436
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
437
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
438
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
439
+
440
+ def forward(
441
+ self,
442
+ hidden_states: torch.Tensor,
443
+ attention_mask: Optional[torch.LongTensor] = None,
444
+ position_ids: Optional[torch.LongTensor] = None,
445
+ past_key_value: Optional[Cache] = None,
446
+ output_attentions: bool = False,
447
+ use_cache: bool = False,
448
+ **kwargs,
449
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
450
+ # Phi3FlashAttention2 attention does not support output_attentions
451
+
452
+ if not _flash_supports_window_size:
453
+ logger.warning_once(
454
+ "The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
455
+ )
456
+ raise ValueError('The current flash attention version does not support sliding window attention.')
457
+
458
+ output_attentions = False
459
+
460
+ if 'padding_mask' in kwargs:
461
+ warnings.warn(
462
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
463
+ )
464
+
465
+ # overwrite attention_mask with padding_mask
466
+ attention_mask = kwargs.pop('padding_mask')
467
+
468
+ bsz, q_len, _ = hidden_states.size()
469
+
470
+ qkv = self.qkv_proj(hidden_states)
471
+ query_pos = self.num_heads * self.head_dim
472
+ query_states = qkv[..., :query_pos]
473
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
474
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
475
+
476
+ # Flash attention requires the input to have the shape
477
+ # batch_size x seq_length x head_dim x hidden_dim
478
+ # therefore we just need to keep the original shape
479
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
480
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
481
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
482
+
483
+ kv_seq_len = key_states.shape[-2]
484
+ if past_key_value is not None:
485
+ if self.layer_idx is None:
486
+ raise ValueError(
487
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
488
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
489
+ 'with a layer index.'
490
+ )
491
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
492
+
493
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
494
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
495
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
496
+
497
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
498
+
499
+ use_sliding_windows = (
500
+ _flash_supports_window_size
501
+ and getattr(self.config, 'sliding_window', None) is not None
502
+ and kv_seq_len > self.config.sliding_window
503
+ )
504
+
505
+ if past_key_value is not None:
506
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
507
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
508
+ if (
509
+ getattr(self.config, 'sliding_window', None) is not None
510
+ and kv_seq_len > self.config.sliding_window
511
+ and cache_has_contents
512
+ ):
513
+ slicing_tokens = 1 - self.config.sliding_window
514
+
515
+ past_key = past_key_value[self.layer_idx][0]
516
+ past_value = past_key_value[self.layer_idx][1]
517
+
518
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
519
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
520
+
521
+ if past_key.shape[-2] != self.config.sliding_window - 1:
522
+ raise ValueError(
523
+ f'past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got'
524
+ f' {past_key.shape}'
525
+ )
526
+
527
+ if attention_mask is not None:
528
+ attention_mask = attention_mask[:, slicing_tokens:]
529
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
530
+
531
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
532
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
533
+
534
+ # repeat k/v heads if n_kv_heads < n_heads
535
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
536
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
537
+
538
+ attn_dropout = self.attention_dropout if self.training else 0.0
539
+
540
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
541
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
542
+ # cast them back in the correct dtype just to be sure everything works as expected.
543
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
544
+ # in fp32.
545
+
546
+ if query_states.dtype == torch.float32:
547
+ if torch.is_autocast_enabled():
548
+ target_dtype = torch.get_autocast_gpu_dtype()
549
+ # Handle the case where the model is quantized
550
+ elif hasattr(self.config, '_pre_quantization_dtype'):
551
+ target_dtype = self.config._pre_quantization_dtype
552
+ else:
553
+ target_dtype = self.qkv_proj.weight.dtype
554
+
555
+ logger.warning_once(
556
+ f'The input hidden states seems to be silently casted in float32, this might be related to'
557
+ f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in'
558
+ f' {target_dtype}.'
559
+ )
560
+
561
+ query_states = query_states.to(target_dtype)
562
+ key_states = key_states.to(target_dtype)
563
+ value_states = value_states.to(target_dtype)
564
+
565
+ # Reashape to the expected shape for Flash Attention
566
+ query_states = query_states.transpose(1, 2)
567
+ key_states = key_states.transpose(1, 2)
568
+ value_states = value_states.transpose(1, 2)
569
+
570
+ attn_output = self._flash_attention_forward(
571
+ query_states,
572
+ key_states,
573
+ value_states,
574
+ attention_mask,
575
+ q_len,
576
+ dropout=attn_dropout,
577
+ use_sliding_windows=use_sliding_windows,
578
+ )
579
+
580
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
581
+ attn_output = self.o_proj(attn_output)
582
+
583
+ if not output_attentions:
584
+ attn_weights = None
585
+
586
+ return attn_output, attn_weights, past_key_value
587
+
588
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
589
+ def _flash_attention_forward(
590
+ self,
591
+ query_states,
592
+ key_states,
593
+ value_states,
594
+ attention_mask,
595
+ query_length,
596
+ dropout=0.0,
597
+ softmax_scale=None,
598
+ use_sliding_windows=False,
599
+ ):
600
+ """
601
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
602
+ first unpad the input, then computes the attention scores and pad the final attention scores.
603
+
604
+ Args:
605
+ query_states (`torch.Tensor`):
606
+ Input query states to be passed to Flash Attention API
607
+ key_states (`torch.Tensor`):
608
+ Input key states to be passed to Flash Attention API
609
+ value_states (`torch.Tensor`):
610
+ Input value states to be passed to Flash Attention API
611
+ attention_mask (`torch.Tensor`):
612
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
613
+ position of padding tokens and 1 for the position of non-padding tokens.
614
+ dropout (`float`):
615
+ Attention dropout
616
+ softmax_scale (`float`, *optional*):
617
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
618
+ use_sliding_windows (`bool`, *optional*):
619
+ Whether to activate sliding window attention.
620
+ """
621
+ if not self._flash_attn_uses_top_left_mask:
622
+ causal = self.is_causal
623
+ else:
624
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
625
+ causal = self.is_causal and query_length != 1
626
+
627
+ # Contains at least one padding token in the sequence
628
+ if attention_mask is not None:
629
+ batch_size = query_states.shape[0]
630
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
631
+ query_states, key_states, value_states, attention_mask, query_length
632
+ )
633
+
634
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
635
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
636
+
637
+ if not use_sliding_windows:
638
+ attn_output_unpad = flash_attn_varlen_func(
639
+ query_states,
640
+ key_states,
641
+ value_states,
642
+ cu_seqlens_q=cu_seqlens_q,
643
+ cu_seqlens_k=cu_seqlens_k,
644
+ max_seqlen_q=max_seqlen_in_batch_q,
645
+ max_seqlen_k=max_seqlen_in_batch_k,
646
+ dropout_p=dropout,
647
+ softmax_scale=softmax_scale,
648
+ causal=causal,
649
+ )
650
+ else:
651
+ attn_output_unpad = flash_attn_varlen_func(
652
+ query_states,
653
+ key_states,
654
+ value_states,
655
+ cu_seqlens_q=cu_seqlens_q,
656
+ cu_seqlens_k=cu_seqlens_k,
657
+ max_seqlen_q=max_seqlen_in_batch_q,
658
+ max_seqlen_k=max_seqlen_in_batch_k,
659
+ dropout_p=dropout,
660
+ softmax_scale=softmax_scale,
661
+ causal=causal,
662
+ window_size=(self.config.sliding_window, self.config.sliding_window),
663
+ )
664
+
665
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
666
+ else:
667
+ if not use_sliding_windows:
668
+ attn_output = flash_attn_func(
669
+ query_states,
670
+ key_states,
671
+ value_states,
672
+ dropout,
673
+ softmax_scale=softmax_scale,
674
+ causal=causal,
675
+ )
676
+ else:
677
+ attn_output = flash_attn_func(
678
+ query_states,
679
+ key_states,
680
+ value_states,
681
+ dropout,
682
+ softmax_scale=softmax_scale,
683
+ causal=causal,
684
+ window_size=(self.config.sliding_window, self.config.sliding_window),
685
+ )
686
+
687
+ return attn_output
688
+
689
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
690
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
691
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
692
+
693
+ # On the first iteration we need to properly re-create the padding mask
694
+ # by slicing it on the proper place
695
+ if kv_seq_len != attention_mask.shape[-1]:
696
+ attention_mask_num_tokens = attention_mask.shape[-1]
697
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
698
+
699
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
700
+
701
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
702
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
703
+
704
+ if query_length == kv_seq_len:
705
+ query_layer = index_first_axis(
706
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
707
+ )
708
+ cu_seqlens_q = cu_seqlens_k
709
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
710
+ indices_q = indices_k
711
+ elif query_length == 1:
712
+ max_seqlen_in_batch_q = 1
713
+ cu_seqlens_q = torch.arange(
714
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
715
+ ) # There is a memcpy here, that is very bad.
716
+ indices_q = cu_seqlens_q[:-1]
717
+ query_layer = query_layer.squeeze(1)
718
+ else:
719
+ # The -q_len: slice assumes left padding.
720
+ attention_mask = attention_mask[:, -query_length:]
721
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
722
+
723
+ return (
724
+ query_layer,
725
+ key_layer,
726
+ value_layer,
727
+ indices_q,
728
+ (cu_seqlens_q, cu_seqlens_k),
729
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
730
+ )
731
+
732
+
733
+ # copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
734
+ # TODO @Arthur no longer copied from LLama after static cache
735
+ class Phi3SdpaAttention(Phi3Attention):
736
+ """
737
+ Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
738
+ `Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
739
+ SDPA API.
740
+ """
741
+
742
+ # Adapted from Phi3Attention.forward
743
+ def forward(
744
+ self,
745
+ hidden_states: torch.Tensor,
746
+ attention_mask: Optional[torch.Tensor] = None,
747
+ position_ids: Optional[torch.LongTensor] = None,
748
+ past_key_value: Optional[Cache] = None,
749
+ output_attentions: bool = False,
750
+ use_cache: bool = False,
751
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
752
+ if output_attentions:
753
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
754
+ logger.warning_once(
755
+ 'Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, '
756
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
757
+ )
758
+ return super().forward(
759
+ hidden_states=hidden_states,
760
+ attention_mask=attention_mask,
761
+ position_ids=position_ids,
762
+ past_key_value=past_key_value,
763
+ output_attentions=output_attentions,
764
+ use_cache=use_cache,
765
+ )
766
+
767
+ bsz, q_len, _ = hidden_states.size()
768
+
769
+ qkv = self.qkv_proj(hidden_states)
770
+ query_pos = self.num_heads * self.head_dim
771
+ query_states = qkv[..., :query_pos]
772
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
773
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
774
+
775
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
776
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
777
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
778
+
779
+ kv_seq_len = key_states.shape[-2]
780
+ if past_key_value is not None:
781
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
782
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
783
+
784
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
785
+
786
+ if past_key_value is not None:
787
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
788
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
789
+
790
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
791
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
792
+
793
+ if attention_mask is not None:
794
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
795
+ raise ValueError(
796
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
797
+ )
798
+
799
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
800
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
801
+ if query_states.device.type == 'cuda' and attention_mask is not None:
802
+ query_states = query_states.contiguous()
803
+ key_states = key_states.contiguous()
804
+ value_states = value_states.contiguous()
805
+
806
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
807
+ query_states,
808
+ key_states,
809
+ value_states,
810
+ attn_mask=attention_mask,
811
+ dropout_p=self.attention_dropout if self.training else 0.0,
812
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
813
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
814
+ )
815
+
816
+ attn_output = attn_output.transpose(1, 2).contiguous()
817
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
818
+
819
+ attn_output = self.o_proj(attn_output)
820
+
821
+ return attn_output, None, past_key_value
822
+
823
+
824
+ PHI3_ATTENTION_CLASSES = {
825
+ 'eager': Phi3Attention,
826
+ 'flash_attention_2': Phi3FlashAttention2,
827
+ 'sdpa': Phi3SdpaAttention,
828
+ }
829
+
830
+
831
+ class Phi3DecoderLayer(nn.Module):
832
+ def __init__(self, config: Phi3Config, layer_idx: int):
833
+ super().__init__()
834
+
835
+ self.config = config
836
+ self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
837
+
838
+ self.mlp = Phi3MLP(config)
839
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
840
+
841
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
842
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
843
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
844
+
845
+ def forward(
846
+ self,
847
+ hidden_states: torch.Tensor,
848
+ attention_mask: Optional[torch.Tensor] = None,
849
+ position_ids: Optional[torch.LongTensor] = None,
850
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
851
+ output_attentions: Optional[bool] = False,
852
+ use_cache: Optional[bool] = False,
853
+ **kwargs,
854
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
855
+ if 'padding_mask' in kwargs:
856
+ warnings.warn(
857
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
858
+ )
859
+ """
860
+ Args:
861
+ hidden_states (`torch.FloatTensor`):
862
+ input to the layer of shape `(batch, seq_len, embed_dim)`
863
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
864
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
865
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
866
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
867
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
868
+ output_attentions (`bool`, *optional*):
869
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
870
+ returned tensors for more detail.
871
+ use_cache (`bool`, *optional*):
872
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
873
+ (see `past_key_values`).
874
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
875
+ """
876
+
877
+ residual = hidden_states
878
+
879
+ hidden_states = self.input_layernorm(hidden_states)
880
+
881
+ # Self Attention
882
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
883
+ hidden_states=hidden_states,
884
+ attention_mask=attention_mask,
885
+ position_ids=position_ids,
886
+ past_key_value=past_key_value,
887
+ output_attentions=output_attentions,
888
+ use_cache=use_cache,
889
+ )
890
+
891
+ hidden_states = residual + self.resid_attn_dropout(attn_outputs)
892
+
893
+ residual = hidden_states
894
+ hidden_states = self.post_attention_layernorm(hidden_states)
895
+ hidden_states = self.mlp(hidden_states)
896
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states)
897
+
898
+ outputs = (hidden_states,)
899
+
900
+ if output_attentions:
901
+ outputs += (self_attn_weights,)
902
+
903
+ if use_cache:
904
+ outputs += (present_key_value,)
905
+
906
+ return outputs
907
+
908
+
909
+ PHI3_START_DOCSTRING = r"""
910
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
911
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
912
+ etc.)
913
+
914
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
915
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
916
+ and behavior.
917
+
918
+ Parameters:
919
+ config ([`Phi3Config`]):
920
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
921
+ load the weights associated with the model, only the configuration. Check out the
922
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
923
+ """
924
+
925
+
926
+ @add_start_docstrings(
927
+ 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
928
+ PHI3_START_DOCSTRING,
929
+ )
930
+ class Phi3PreTrainedModel(PreTrainedModel):
931
+ config_class = Phi3Config
932
+ base_model_prefix = 'model'
933
+ supports_gradient_checkpointing = True
934
+ _no_split_modules = ['Phi3DecoderLayer']
935
+ _skip_keys_device_placement = 'past_key_values'
936
+ _supports_flash_attn_2 = True
937
+ _supports_sdpa = False
938
+ _supports_cache_class = True
939
+
940
+ _version = '0.0.5'
941
+
942
+ def __init__(self, config: Phi3Config):
943
+ if not has_flash_attn:
944
+ config._attn_implementation = 'eager'
945
+ print('Warning: Flash attention is not available, using eager attention instead.')
946
+ super().__init__(config)
947
+
948
+ def _init_weights(self, module):
949
+ std = self.config.initializer_range
950
+ if isinstance(module, nn.Linear):
951
+ module.weight.data.normal_(mean=0.0, std=std)
952
+ if module.bias is not None:
953
+ module.bias.data.zero_()
954
+ elif isinstance(module, nn.Embedding):
955
+ module.weight.data.normal_(mean=0.0, std=std)
956
+ if module.padding_idx is not None:
957
+ module.weight.data[module.padding_idx].zero_()
958
+
959
+
960
+ PHI3_INPUTS_DOCSTRING = r"""
961
+ Args:
962
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
963
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
964
+ it.
965
+
966
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
967
+ [`PreTrainedTokenizer.__call__`] for details.
968
+
969
+ [What are input IDs?](../glossary#input-ids)
970
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
971
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
972
+
973
+ - 1 for tokens that are **not masked**,
974
+ - 0 for tokens that are **masked**.
975
+
976
+ [What are attention masks?](../glossary#attention-mask)
977
+
978
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
979
+ [`PreTrainedTokenizer.__call__`] for details.
980
+
981
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
982
+ `past_key_values`).
983
+
984
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
985
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
986
+ information on the default strategy.
987
+
988
+ - 1 indicates the head is **not masked**,
989
+ - 0 indicates the head is **masked**.
990
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
991
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
992
+ config.n_positions - 1]`.
993
+
994
+ [What are position IDs?](../glossary#position-ids)
995
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
996
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
997
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
998
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
999
+
1000
+ Two formats are allowed:
1001
+ - a [`~cache_utils.Cache`] instance;
1002
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1003
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1004
+ cache format.
1005
+
1006
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1007
+ legacy cache format will be returned.
1008
+
1009
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1010
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1011
+ of shape `(batch_size, sequence_length)`.
1012
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1013
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1014
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1015
+ model's internal embedding lookup matrix.
1016
+ use_cache (`bool`, *optional*):
1017
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1018
+ `past_key_values`).
1019
+ output_attentions (`bool`, *optional*):
1020
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1021
+ tensors for more detail.
1022
+ output_hidden_states (`bool`, *optional*):
1023
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1024
+ more detail.
1025
+ return_dict (`bool`, *optional*):
1026
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1027
+ """
1028
+
1029
+
1030
+ @add_start_docstrings(
1031
+ 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
1032
+ PHI3_START_DOCSTRING,
1033
+ )
1034
+ class Phi3Model(Phi3PreTrainedModel):
1035
+ """
1036
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
1037
+
1038
+ Args:
1039
+ config: Phi3Config
1040
+ """
1041
+
1042
+ def __init__(self, config: Phi3Config):
1043
+ super().__init__(config)
1044
+ self.padding_idx = config.pad_token_id
1045
+ self.vocab_size = config.vocab_size
1046
+
1047
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1048
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
1049
+ self.layers = nn.ModuleList(
1050
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1051
+ )
1052
+ self._attn_implementation = config._attn_implementation
1053
+
1054
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1055
+
1056
+ self.gradient_checkpointing = False
1057
+ # Initialize weights and apply final processing
1058
+ self.post_init()
1059
+
1060
+ def get_input_embeddings(self):
1061
+ return self.embed_tokens
1062
+
1063
+ def set_input_embeddings(self, value):
1064
+ self.embed_tokens = value
1065
+
1066
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1067
+ def forward(
1068
+ self,
1069
+ input_ids: torch.LongTensor = None,
1070
+ attention_mask: Optional[torch.Tensor] = None,
1071
+ position_ids: Optional[torch.LongTensor] = None,
1072
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1073
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1074
+ use_cache: Optional[bool] = None,
1075
+ output_attentions: Optional[bool] = None,
1076
+ output_hidden_states: Optional[bool] = None,
1077
+ return_dict: Optional[bool] = None,
1078
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1079
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1080
+ output_hidden_states = (
1081
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1082
+ )
1083
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1084
+
1085
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1086
+
1087
+ # retrieve input_ids and inputs_embeds
1088
+ if input_ids is not None and inputs_embeds is not None:
1089
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
1090
+ elif input_ids is not None:
1091
+ batch_size, seq_length = input_ids.shape[:2]
1092
+ elif inputs_embeds is not None:
1093
+ batch_size, seq_length = inputs_embeds.shape[:2]
1094
+ else:
1095
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
1096
+
1097
+ past_key_values_length = 0
1098
+
1099
+ if self.gradient_checkpointing and self.training:
1100
+ if use_cache:
1101
+ logger.warning_once(
1102
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
1103
+ )
1104
+ use_cache = False
1105
+
1106
+ if use_cache:
1107
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1108
+ if use_legacy_cache:
1109
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1110
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1111
+
1112
+ if position_ids is None:
1113
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1114
+ position_ids = torch.arange(
1115
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1116
+ )
1117
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1118
+ else:
1119
+ position_ids = position_ids.view(-1, seq_length).long()
1120
+
1121
+ if inputs_embeds is None:
1122
+ inputs_embeds = self.embed_tokens(input_ids)
1123
+
1124
+ if attention_mask is not None and self._attn_implementation == 'flash_attention_2' and use_cache:
1125
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1126
+ if is_padding_right:
1127
+ raise ValueError(
1128
+ "You are attempting to perform batched generation with padding_side='right'"
1129
+ ' this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to '
1130
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1131
+ )
1132
+
1133
+ if self._attn_implementation == 'flash_attention_2':
1134
+ # 2d mask is passed through the layers
1135
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1136
+ else:
1137
+ # 4d mask is passed through the layers
1138
+ attention_mask = _prepare_4d_causal_attention_mask(
1139
+ attention_mask,
1140
+ (batch_size, seq_length),
1141
+ inputs_embeds,
1142
+ past_key_values_length,
1143
+ sliding_window=self.config.sliding_window,
1144
+ )
1145
+
1146
+ hidden_states = inputs_embeds
1147
+
1148
+ # decoder layers
1149
+ all_hidden_states = () if output_hidden_states else None
1150
+ all_self_attns = () if output_attentions else None
1151
+ next_decoder_cache = None
1152
+
1153
+ for decoder_layer in self.layers:
1154
+ if output_hidden_states:
1155
+ all_hidden_states += (hidden_states,)
1156
+
1157
+ if self.gradient_checkpointing and self.training:
1158
+ layer_outputs = self._gradient_checkpointing_func(
1159
+ decoder_layer.__call__,
1160
+ hidden_states,
1161
+ attention_mask,
1162
+ position_ids,
1163
+ past_key_values,
1164
+ output_attentions,
1165
+ use_cache,
1166
+ )
1167
+ else:
1168
+ layer_outputs = decoder_layer(
1169
+ hidden_states,
1170
+ attention_mask=attention_mask,
1171
+ position_ids=position_ids,
1172
+ past_key_value=past_key_values,
1173
+ output_attentions=output_attentions,
1174
+ use_cache=use_cache,
1175
+ )
1176
+
1177
+ hidden_states = layer_outputs[0]
1178
+
1179
+ if use_cache:
1180
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1181
+
1182
+ if output_attentions:
1183
+ all_self_attns += (layer_outputs[1],)
1184
+
1185
+ hidden_states = self.norm(hidden_states)
1186
+
1187
+ # add hidden states from the last decoder layer
1188
+ if output_hidden_states:
1189
+ all_hidden_states += (hidden_states,)
1190
+
1191
+ next_cache = None
1192
+ if use_cache:
1193
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1194
+ if not return_dict:
1195
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1196
+ return BaseModelOutputWithPast(
1197
+ last_hidden_state=hidden_states,
1198
+ past_key_values=next_cache,
1199
+ hidden_states=all_hidden_states,
1200
+ attentions=all_self_attns,
1201
+ )
1202
+
1203
+
1204
+ class Phi3ForCausalLM(Phi3PreTrainedModel):
1205
+ _tied_weights_keys = ['lm_head.weight']
1206
+
1207
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
1208
+ def __init__(self, config):
1209
+ super().__init__(config)
1210
+ self.model = Phi3Model(config)
1211
+ self.vocab_size = config.vocab_size
1212
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1213
+
1214
+ # Initialize weights and apply final processing
1215
+ self.post_init()
1216
+
1217
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1218
+ def get_input_embeddings(self):
1219
+ return self.model.embed_tokens
1220
+
1221
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1222
+ def set_input_embeddings(self, value):
1223
+ self.model.embed_tokens = value
1224
+
1225
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1226
+ def get_output_embeddings(self):
1227
+ return self.lm_head
1228
+
1229
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1230
+ def set_output_embeddings(self, new_embeddings):
1231
+ self.lm_head = new_embeddings
1232
+
1233
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1234
+ def set_decoder(self, decoder):
1235
+ self.model = decoder
1236
+
1237
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1238
+ def get_decoder(self):
1239
+ return self.model
1240
+
1241
+ # Ignore copy
1242
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1243
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1244
+ def forward(
1245
+ self,
1246
+ input_ids: torch.LongTensor = None,
1247
+ attention_mask: Optional[torch.Tensor] = None,
1248
+ position_ids: Optional[torch.LongTensor] = None,
1249
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1250
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1251
+ labels: Optional[torch.LongTensor] = None,
1252
+ use_cache: Optional[bool] = None,
1253
+ output_attentions: Optional[bool] = None,
1254
+ output_hidden_states: Optional[bool] = None,
1255
+ return_dict: Optional[bool] = None,
1256
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1257
+ r"""
1258
+ Args:
1259
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1260
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1261
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1262
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1263
+
1264
+ Returns:
1265
+
1266
+ Example:
1267
+
1268
+ ```python
1269
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
1270
+
1271
+ >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1272
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1273
+
1274
+ >>> prompt = "This is an example script ."
1275
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1276
+
1277
+ >>> # Generate
1278
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1279
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1280
+ 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
1281
+ ```"""
1282
+
1283
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1284
+ output_hidden_states = (
1285
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1286
+ )
1287
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1288
+
1289
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1290
+ outputs = self.model(
1291
+ input_ids=input_ids,
1292
+ attention_mask=attention_mask,
1293
+ position_ids=position_ids,
1294
+ past_key_values=past_key_values,
1295
+ inputs_embeds=inputs_embeds,
1296
+ use_cache=use_cache,
1297
+ output_attentions=output_attentions,
1298
+ output_hidden_states=output_hidden_states,
1299
+ return_dict=return_dict,
1300
+ )
1301
+
1302
+ hidden_states = outputs[0]
1303
+ logits = self.lm_head(hidden_states)
1304
+ logits = logits.float()
1305
+
1306
+ loss = None
1307
+ if labels is not None:
1308
+ # Shift so that tokens < n predict n
1309
+ shift_logits = logits[..., :-1, :].contiguous()
1310
+ shift_labels = labels[..., 1:].contiguous()
1311
+ # Flatten the tokens
1312
+ loss_fct = CrossEntropyLoss()
1313
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1314
+ shift_labels = shift_labels.view(-1)
1315
+ # Enable model parallelism
1316
+ shift_labels = shift_labels.to(shift_logits.device)
1317
+ loss = loss_fct(shift_logits, shift_labels)
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[1:]
1321
+ return (loss,) + output if loss is not None else output
1322
+
1323
+ return CausalLMOutputWithPast(
1324
+ loss=loss,
1325
+ logits=logits,
1326
+ past_key_values=outputs.past_key_values,
1327
+ hidden_states=outputs.hidden_states,
1328
+ attentions=outputs.attentions,
1329
+ )
1330
+
1331
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1332
+ def prepare_inputs_for_generation(
1333
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1334
+ ):
1335
+ if past_key_values is not None:
1336
+ if isinstance(past_key_values, Cache):
1337
+ cache_length = past_key_values.get_seq_length()
1338
+ past_length = past_key_values.seen_tokens
1339
+ max_cache_length = past_key_values.get_max_length()
1340
+ else:
1341
+ cache_length = past_length = past_key_values[0][0].shape[2]
1342
+ max_cache_length = None
1343
+
1344
+ # Keep only the unprocessed tokens:
1345
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1346
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1347
+ # input)
1348
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1349
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1350
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1351
+ # input_ids based on the past_length.
1352
+ elif past_length < input_ids.shape[1]:
1353
+ input_ids = input_ids[:, past_length:]
1354
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1355
+
1356
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1357
+ if (
1358
+ max_cache_length is not None
1359
+ and attention_mask is not None
1360
+ and cache_length + input_ids.shape[1] > max_cache_length
1361
+ ):
1362
+ attention_mask = attention_mask[:, -max_cache_length:]
1363
+
1364
+ position_ids = kwargs.get('position_ids', None)
1365
+ if attention_mask is not None and position_ids is None:
1366
+ # create position_ids on the fly for batch generation
1367
+ position_ids = attention_mask.long().cumsum(-1) - 1
1368
+ position_ids.masked_fill_(attention_mask == 0, 1)
1369
+ if past_key_values:
1370
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1371
+
1372
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1373
+ if (inputs_embeds is not None and past_key_values is None) or (inputs_embeds is not None and len(past_key_values) == 0):
1374
+ model_inputs = {'inputs_embeds': inputs_embeds}
1375
+ else:
1376
+ model_inputs = {'input_ids': input_ids}
1377
+
1378
+ model_inputs.update(
1379
+ {
1380
+ 'position_ids': position_ids,
1381
+ 'past_key_values': past_key_values,
1382
+ 'use_cache': kwargs.get('use_cache'),
1383
+ 'attention_mask': attention_mask,
1384
+ }
1385
+ )
1386
+ return model_inputs
1387
+
1388
+ @staticmethod
1389
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1390
+ def _reorder_cache(past_key_values, beam_idx):
1391
+ reordered_past = ()
1392
+ for layer_past in past_key_values:
1393
+ reordered_past += (
1394
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1395
+ )
1396
+ return reordered_past
1397
+
1398
+
1399
+ @add_start_docstrings(
1400
+ """
1401
+ The [`Phi3Model`] with a sequence classification head on top (linear layer).
1402
+
1403
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1404
+ (e.g. GPT-2) do.
1405
+
1406
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1407
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1408
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1409
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1410
+ each row of the batch).
1411
+ """,
1412
+ PHI3_START_DOCSTRING,
1413
+ )
1414
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
1415
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
1416
+ def __init__(self, config):
1417
+ super().__init__(config)
1418
+ self.num_labels = config.num_labels
1419
+ self.model = Phi3Model(config)
1420
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1421
+
1422
+ # Initialize weights and apply final processing
1423
+ self.post_init()
1424
+
1425
+ def get_input_embeddings(self):
1426
+ return self.model.embed_tokens
1427
+
1428
+ def set_input_embeddings(self, value):
1429
+ self.model.embed_tokens = value
1430
+
1431
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1432
+ def forward(
1433
+ self,
1434
+ input_ids: torch.LongTensor = None,
1435
+ attention_mask: Optional[torch.Tensor] = None,
1436
+ position_ids: Optional[torch.LongTensor] = None,
1437
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1438
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1439
+ labels: Optional[torch.LongTensor] = None,
1440
+ use_cache: Optional[bool] = None,
1441
+ output_attentions: Optional[bool] = None,
1442
+ output_hidden_states: Optional[bool] = None,
1443
+ return_dict: Optional[bool] = None,
1444
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1445
+ r"""
1446
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1447
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1448
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1449
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1450
+ """
1451
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1452
+
1453
+ model_outputs = self.model(
1454
+ input_ids,
1455
+ attention_mask=attention_mask,
1456
+ position_ids=position_ids,
1457
+ past_key_values=past_key_values,
1458
+ inputs_embeds=inputs_embeds,
1459
+ use_cache=use_cache,
1460
+ output_attentions=output_attentions,
1461
+ output_hidden_states=output_hidden_states,
1462
+ return_dict=return_dict,
1463
+ )
1464
+ hidden_states = model_outputs[0]
1465
+ logits = self.score(hidden_states)
1466
+
1467
+ if input_ids is not None:
1468
+ batch_size = input_ids.shape[0]
1469
+ else:
1470
+ batch_size = inputs_embeds.shape[0]
1471
+
1472
+ if self.config.pad_token_id is None and batch_size != 1:
1473
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1474
+ if self.config.pad_token_id is None:
1475
+ sequence_lengths = -1
1476
+ else:
1477
+ if input_ids is not None:
1478
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1479
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1480
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1481
+ sequence_lengths = sequence_lengths.to(logits.device)
1482
+ else:
1483
+ sequence_lengths = -1
1484
+
1485
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1486
+
1487
+ loss = None
1488
+ if labels is not None:
1489
+ labels = labels.to(logits.device)
1490
+ if self.config.problem_type is None:
1491
+ if self.num_labels == 1:
1492
+ self.config.problem_type = 'regression'
1493
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1494
+ self.config.problem_type = 'single_label_classification'
1495
+ else:
1496
+ self.config.problem_type = 'multi_label_classification'
1497
+
1498
+ if self.config.problem_type == 'regression':
1499
+ loss_fct = MSELoss()
1500
+ if self.num_labels == 1:
1501
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1502
+ else:
1503
+ loss = loss_fct(pooled_logits, labels)
1504
+ elif self.config.problem_type == 'single_label_classification':
1505
+ loss_fct = CrossEntropyLoss()
1506
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1507
+ elif self.config.problem_type == 'multi_label_classification':
1508
+ loss_fct = BCEWithLogitsLoss()
1509
+ loss = loss_fct(pooled_logits, labels)
1510
+ if not return_dict:
1511
+ output = (pooled_logits,) + model_outputs[1:]
1512
+ return ((loss,) + output) if loss is not None else output
1513
+
1514
+ return SequenceClassifierOutputWithPast(
1515
+ loss=loss,
1516
+ logits=pooled_logits,
1517
+ past_key_values=model_outputs.past_key_values,
1518
+ hidden_states=model_outputs.hidden_states,
1519
+ attentions=model_outputs.attentions,
1520
+ )
1521
+
1522
+
1523
+ @add_start_docstrings(
1524
+ """
1525
+ [`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1526
+ Named-Entity-Recognition (NER) tasks.
1527
+ """,
1528
+ PHI3_START_DOCSTRING,
1529
+ )
1530
+ # Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
1531
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1532
+ def __init__(self, config: Phi3Config):
1533
+ super().__init__(config)
1534
+ self.num_labels = config.num_labels
1535
+
1536
+ self.model = Phi3Model(config)
1537
+ if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
1538
+ classifier_dropout = config.classifier_dropout
1539
+ elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
1540
+ classifier_dropout = config.hidden_dropout
1541
+ else:
1542
+ classifier_dropout = 0.1
1543
+ self.dropout = nn.Dropout(classifier_dropout)
1544
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1545
+
1546
+ # Initialize weights and apply final processing
1547
+ self.post_init()
1548
+
1549
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1550
+ @add_code_sample_docstrings(
1551
+ checkpoint=_CHECKPOINT_FOR_DOC,
1552
+ output_type=TokenClassifierOutput,
1553
+ config_class=_CONFIG_FOR_DOC,
1554
+ )
1555
+ def forward(
1556
+ self,
1557
+ input_ids: Optional[torch.LongTensor] = None,
1558
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1559
+ attention_mask: Optional[torch.Tensor] = None,
1560
+ inputs_embeds: Optional[torch.Tensor] = None,
1561
+ labels: Optional[torch.Tensor] = None,
1562
+ use_cache: Optional[bool] = None,
1563
+ output_attentions: Optional[bool] = None,
1564
+ output_hidden_states: Optional[bool] = None,
1565
+ return_dict: Optional[bool] = None,
1566
+ **deprecated_arguments,
1567
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1568
+ r"""
1569
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1570
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1571
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1572
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1573
+ """
1574
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1575
+
1576
+ model_outputs = self.model(
1577
+ input_ids,
1578
+ past_key_values=past_key_values,
1579
+ attention_mask=attention_mask,
1580
+ inputs_embeds=inputs_embeds,
1581
+ use_cache=use_cache,
1582
+ output_attentions=output_attentions,
1583
+ output_hidden_states=output_hidden_states,
1584
+ return_dict=return_dict,
1585
+ )
1586
+
1587
+ hidden_states = model_outputs[0]
1588
+ hidden_states = self.dropout(hidden_states)
1589
+ logits = self.classifier(hidden_states)
1590
+
1591
+ loss = None
1592
+ if labels is not None:
1593
+ # move labels to correct device to enable model parallelism
1594
+ labels = labels.to(logits.device)
1595
+ batch_size, seq_length = labels.shape
1596
+ loss_fct = CrossEntropyLoss()
1597
+ loss = loss_fct(
1598
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1599
+ )
1600
+
1601
+ if not return_dict:
1602
+ output = (logits,) + model_outputs[2:]
1603
+ return ((loss,) + output) if loss is not None else output
1604
+
1605
+ return TokenClassifierOutput(
1606
+ loss=loss,
1607
+ logits=logits,
1608
+ hidden_states=model_outputs.hidden_states,
1609
+ attentions=model_outputs.attentions,
1610
+ )
modeling_sa2va_chat.py ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import Any, List, Optional, Tuple, Union
9
+
10
+ import torchvision.transforms as T
11
+ from torchvision.transforms.functional import InterpolationMode
12
+
13
+ import torch.utils.checkpoint
14
+ import transformers
15
+
16
+ from .modeling_internlm2 import InternLM2ForCausalLM
17
+ from .modeling_phi3 import Phi3ForCausalLM
18
+ from peft import LoraConfig, get_peft_model
19
+ from torch import nn
20
+ from torch.nn import CrossEntropyLoss
21
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
22
+ LlamaTokenizer, Qwen2ForCausalLM)
23
+ from transformers.modeling_outputs import CausalLMOutputWithPast
24
+ from transformers.modeling_utils import PreTrainedModel
25
+ from transformers.utils import ModelOutput, logging
26
+ from transformers import StoppingCriteriaList, StoppingCriteria
27
+
28
+ from .configuration_sa2va_chat import Sa2VAChatConfig
29
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
30
+
31
+ from .sam2 import SAM2
32
+ from .templates import PROMPT_TEMPLATE
33
+
34
+ import numpy as np
35
+ from torchvision.transforms.functional import resize, to_pil_image
36
+
37
+ from types import MethodType
38
+ import torch.nn.functional as F
39
+
40
+ try:
41
+ from .flash_attention import FlashAttention
42
+ has_flash_attn = True
43
+ except:
44
+ print('FlashAttention is not installed.')
45
+ has_flash_attn = False
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ def version_cmp(v1, v2, op='eq'):
50
+ import operator
51
+
52
+ from packaging import version
53
+ op_func = getattr(operator, op)
54
+ return op_func(version.parse(v1), version.parse(v2))
55
+
56
+ class StopWordStoppingCriteria(StoppingCriteria):
57
+ """StopWord stopping criteria."""
58
+
59
+ def __init__(self, tokenizer, stop_word):
60
+ self.tokenizer = tokenizer
61
+ self.stop_word = stop_word
62
+ self.length = len(self.stop_word)
63
+
64
+ def __call__(self, input_ids, *args, **kwargs) -> bool:
65
+ cur_text = self.tokenizer.decode(input_ids[0])
66
+ cur_text = cur_text.replace('\r', '').replace('\n', '')
67
+ return cur_text[-self.length:] == self.stop_word
68
+
69
+ def get_stop_criteria(
70
+ tokenizer,
71
+ stop_words=[],
72
+ ):
73
+ stop_criteria = StoppingCriteriaList()
74
+ for word in stop_words:
75
+ stop_criteria.append(StopWordStoppingCriteria(tokenizer, word))
76
+ return stop_criteria
77
+
78
+ class DirectResize:
79
+ def __init__(self, target_length: int) -> None:
80
+ self.target_length = target_length
81
+
82
+ def apply_image(self, image: np.ndarray) -> np.ndarray:
83
+ """
84
+ Expects a numpy array with shape HxWxC in uint8 format.
85
+ """
86
+ img = to_pil_image(image, mode='RGB')
87
+ return np.array(img.resize((self.target_length, self.target_length)))
88
+
89
+ class Sa2VAChatModel(PreTrainedModel):
90
+ config_class = Sa2VAChatConfig
91
+ main_input_name = 'pixel_values'
92
+ base_model_prefix = 'language_model'
93
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer',
94
+ 'Phi3DecoderLayer', 'Qwen2DecoderLayer', 'SAM2']
95
+ _supports_flash_attn_2 = True
96
+ supports_gradient_checkpointing = True
97
+
98
+ def __init__(self, config: Sa2VAChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
99
+ super().__init__(config)
100
+
101
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
102
+ image_size = config.force_image_size or config.vision_config.image_size
103
+ patch_size = config.vision_config.patch_size
104
+ self.patch_size = patch_size
105
+ self.select_layer = config.select_layer
106
+ self.template = config.template
107
+ self.template = self.template.replace('-', '_')
108
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
109
+ self.downsample_ratio = config.downsample_ratio
110
+ self.ps_version = config.ps_version
111
+ self.llm_arch_name = config.llm_config.architectures[0]
112
+
113
+ use_flash_attn = use_flash_attn if has_flash_attn else False
114
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
115
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
116
+
117
+ logger.info(f'num_image_token: {self.num_image_token}')
118
+ logger.info(f'ps_version: {self.ps_version}')
119
+ if vision_model is not None:
120
+ self.vision_model = vision_model
121
+ else:
122
+ self.vision_model = InternVisionModel(config.vision_config)
123
+ if language_model is not None:
124
+ self.language_model = language_model
125
+ else:
126
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
127
+ self.language_model = LlamaForCausalLM(config.llm_config)
128
+ elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM':
129
+ self.language_model = InternLM2ForCausalLM(config.llm_config)
130
+ elif config.llm_config.architectures[0] == 'Phi3ForCausalLM':
131
+ self.language_model = Phi3ForCausalLM(config.llm_config)
132
+ elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
133
+ self.language_model = Qwen2ForCausalLM(config.llm_config)
134
+ else:
135
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
136
+
137
+ vit_hidden_size = config.vision_config.hidden_size
138
+ llm_hidden_size = config.llm_config.hidden_size
139
+
140
+ self.mlp1 = nn.Sequential(
141
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
142
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
143
+ nn.GELU(),
144
+ nn.Linear(llm_hidden_size, llm_hidden_size)
145
+ )
146
+
147
+ self.img_context_token_id = None
148
+ self.conv_template = PROMPT_TEMPLATE[self.template]
149
+ self.template = self.conv_template
150
+ if hasattr(config, 'system_message'):
151
+ self.system_message = config.system_message
152
+ self.num_samples = 0
153
+
154
+ if config.use_backbone_lora:
155
+ self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora)
156
+
157
+ if config.use_llm_lora:
158
+ self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora)
159
+
160
+ self.grounding_encoder = SAM2()
161
+ out_dim = self.grounding_encoder.hidden_dim
162
+ in_dim = llm_hidden_size
163
+ self.text_hidden_fcs = nn.Sequential(
164
+ nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True),
165
+ nn.Linear(in_dim, out_dim), nn.Dropout(0.0)
166
+ )
167
+
168
+ self.init_prediction_config = False
169
+
170
+ def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
171
+ lora_config = LoraConfig(
172
+ r=r,
173
+ target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'],
174
+ lora_alpha=lora_alpha,
175
+ lora_dropout=lora_dropout,
176
+ )
177
+ self.vision_model = get_peft_model(self.vision_model, lora_config)
178
+ self.vision_model.print_trainable_parameters()
179
+
180
+ def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
181
+ # Determine the target modules based on the architecture of the language model
182
+ if self.llm_arch_name == 'InternLM2ForCausalLM':
183
+ target_modules = ['attention.wqkv', 'attention.wo', 'feed_forward.w1', 'feed_forward.w2', 'feed_forward.w3']
184
+ elif self.llm_arch_name == 'Phi3ForCausalLM':
185
+ target_modules = ['mlp.down_proj', 'mlp.gate_up_proj', 'self_attn.o_proj', 'self_attn.qkv_proj']
186
+ elif self.llm_arch_name in ['Qwen2ForCausalLM', 'LlamaForCausalLM']:
187
+ target_modules = ['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj',
188
+ 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj']
189
+ else:
190
+ raise NotImplemented
191
+ lora_config = LoraConfig(
192
+ r=r,
193
+ target_modules=target_modules,
194
+ lora_alpha=lora_alpha,
195
+ lora_dropout=lora_dropout,
196
+ task_type='CAUSAL_LM'
197
+ )
198
+ self.language_model = get_peft_model(self.language_model, lora_config)
199
+ self.language_model.enable_input_require_grads()
200
+ self.language_model.print_trainable_parameters()
201
+
202
+ def pixel_shuffle(self, x, scale_factor=0.5):
203
+ n, w, h, c = x.size()
204
+ # N, W, H, C --> N, W, H * scale, C // scale
205
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
206
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
207
+ x = x.permute(0, 2, 1, 3).contiguous()
208
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
209
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
210
+ int(c / (scale_factor * scale_factor)))
211
+ if self.ps_version == 'v1':
212
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
213
+ 'which results in a transposed image.')
214
+ else:
215
+ x = x.permute(0, 2, 1, 3).contiguous()
216
+ return x
217
+
218
+ def extract_feature(self, pixel_values):
219
+ if self.select_layer == -1:
220
+ vit_embeds = self.vision_model(
221
+ pixel_values=pixel_values,
222
+ output_hidden_states=False,
223
+ return_dict=True).last_hidden_state
224
+ else:
225
+ vit_embeds = self.vision_model(
226
+ pixel_values=pixel_values,
227
+ output_hidden_states=True,
228
+ return_dict=True).hidden_states[self.select_layer]
229
+ vit_embeds = vit_embeds[:, 1:, :]
230
+
231
+ h = w = int(vit_embeds.shape[1] ** 0.5)
232
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
233
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
234
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
235
+ vit_embeds = self.mlp1(vit_embeds)
236
+ return vit_embeds
237
+
238
+ @property
239
+ def lm_head(self):
240
+ return self.language_model.get_output_embeddings()
241
+
242
+ def get_input_embeddings(self):
243
+ return self.language_model.get_input_embeddings()
244
+
245
+ def get_output_embeddings(self):
246
+ return self.language_model.get_output_embeddings()
247
+
248
+ def forward(self, data, data_samples=None, mode='loss'):
249
+ pixel_values = data['pixel_values']
250
+
251
+ if type(pixel_values) is list or pixel_values.ndim == 5:
252
+ if type(pixel_values) is list:
253
+ pixel_values = [
254
+ x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values
255
+ ]
256
+ # b*n, c, h, w
257
+ concat_images = torch.cat(
258
+ [image.to(self.vision_model.dtype) for image in pixel_values], dim=0)
259
+ else:
260
+ raise NotImplementedError()
261
+
262
+ input_ids = data['input_ids']
263
+ position_ids = data['position_ids']
264
+ attention_mask = data['attention_mask']
265
+ # sum is 0 are text
266
+ image_flags = torch.sum(concat_images, dim=(1, 2, 3)) != 0
267
+ image_flags = image_flags.long()
268
+
269
+ labels = data['labels']
270
+ use_cache = False
271
+
272
+ if 'vp_overall_mask' not in data.keys():
273
+ vp_overall_mask = None
274
+ else:
275
+ vp_overall_mask = data['vp_overall_mask']
276
+
277
+ if 'prompt_masks' in data.keys():
278
+ prompt_masks = data['prompt_masks']
279
+ else:
280
+ prompt_masks = None
281
+
282
+ outputs = self._llm_forward(
283
+ input_ids=input_ids,
284
+ position_ids=position_ids,
285
+ attention_mask=attention_mask,
286
+ image_flags=image_flags,
287
+ pixel_values=concat_images,
288
+ labels=labels,
289
+ use_cache=use_cache,
290
+ output_hidden_states=True,
291
+ vp_overall_mask=vp_overall_mask,
292
+ prompt_masks=prompt_masks,
293
+ )
294
+
295
+ return outputs
296
+
297
+ def _llm_forward(
298
+ self,
299
+ pixel_values: torch.FloatTensor,
300
+ input_ids: torch.LongTensor = None,
301
+ attention_mask: Optional[torch.Tensor] = None,
302
+ position_ids: Optional[torch.LongTensor] = None,
303
+ image_flags: Optional[torch.LongTensor] = None,
304
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
305
+ labels: Optional[torch.LongTensor] = None,
306
+ use_cache: Optional[bool] = None,
307
+ output_attentions: Optional[bool] = None,
308
+ output_hidden_states: Optional[bool] = None,
309
+ return_dict: Optional[bool] = None,
310
+ vp_overall_mask=None,
311
+ prompt_masks=None,
312
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
313
+ return_dict = return_dict if return_dict is not None \
314
+ else self.config.use_return_dict
315
+
316
+ image_flags = image_flags.squeeze(-1)
317
+ # We only added the clone code here to avoid the error.
318
+ input_embeds = self.language_model.get_input_embeddings()(
319
+ input_ids).clone()
320
+
321
+ vit_embeds = self.extract_feature(pixel_values)
322
+ vit_embeds = vit_embeds.to(input_embeds.dtype) # FIXME: why vit_embeds is float16?
323
+ fast_vit_embeds = None
324
+
325
+ vit_embeds = vit_embeds[image_flags == 1]
326
+ vit_batch_size = pixel_values.shape[0]
327
+
328
+ B, N, C = input_embeds.shape
329
+ input_embeds = input_embeds.reshape(B * N, C)
330
+
331
+ self._count += 1
332
+
333
+ if vp_overall_mask is not None and prompt_masks is not None:
334
+ vp_embeds = []
335
+ vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool()
336
+ prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks]
337
+
338
+ vp_overall_mask = vp_overall_mask[image_flags == 1]
339
+ overall_tile_vit_embeds = vit_embeds[vp_overall_mask] # (n_img, hw, c)
340
+
341
+ i_vp_img = 0
342
+ for i_img in range(len(vit_embeds)):
343
+ vp_embeds.append(vit_embeds[i_img].reshape(-1, C))
344
+ if vp_overall_mask[i_img]:
345
+ tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) # (hw, C)
346
+ objects_prompt_masks = prompt_masks[i_vp_img]
347
+ n_obj = len(objects_prompt_masks)
348
+ tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1)
349
+ objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1)
350
+ vp_embeds.append(tile_vit_embeds[objects_prompt_masks])
351
+ i_vp_img += 1
352
+ vp_embeds = torch.cat(vp_embeds, dim=0)
353
+ else:
354
+ vp_embeds = None
355
+
356
+ input_ids = input_ids.reshape(B * N)
357
+ selected = (input_ids == self.img_context_token_id)
358
+
359
+ if vp_embeds is None:
360
+ try:
361
+ input_embeds[selected] = vit_embeds.reshape(-1, C)
362
+ except Exception as e:
363
+ vit_embeds = vit_embeds.reshape(-1, C)
364
+ print(f'warning: {e}, input_embeds[selected].shape='
365
+ f'{input_embeds[selected].shape}, '
366
+ f'vit_embeds.shape={vit_embeds.shape}')
367
+ n_token = selected.sum()
368
+ if n_token > len(vit_embeds):
369
+ print(f"Wrong !!! {n_token} image tokens in text but only {len(vit_embeds)} vit embeds !!!")
370
+ expand_ratio = n_token // len(vit_embeds) + 1
371
+ vit_embeds = torch.cat([vit_embeds] * expand_ratio, dim=0)
372
+
373
+ input_embeds[selected] = vit_embeds[:n_token]
374
+ else:
375
+ try:
376
+ input_embeds[selected] = vp_embeds.reshape(-1, C)
377
+ except Exception as e:
378
+ vp_embeds = vp_embeds.reshape(-1, C)
379
+ print(f'warning: {e}, input_embeds[selected].shape='
380
+ f'{input_embeds[selected].shape}, '
381
+ f'vp_embeds.shape={vp_embeds.shape}')
382
+ n_token = selected.sum()
383
+ if n_token > len(vp_embeds):
384
+ print(f"Wrong !!! {n_token} image tokens in text but only {len(vp_embeds)} vit embeds !!!")
385
+ expand_ratio = n_token // len(vp_embeds) + 1
386
+ vp_embeds = torch.cat([vp_embeds] * expand_ratio, dim=0)
387
+
388
+ input_embeds[selected] = vp_embeds[:n_token]
389
+
390
+ input_embeds = input_embeds.reshape(B, N, C)
391
+
392
+ outputs = self.language_model(
393
+ inputs_embeds=input_embeds,
394
+ attention_mask=attention_mask,
395
+ position_ids=position_ids,
396
+ past_key_values=past_key_values,
397
+ use_cache=use_cache,
398
+ output_attentions=output_attentions,
399
+ output_hidden_states=output_hidden_states,
400
+ return_dict=return_dict,
401
+ )
402
+ logits = outputs.logits
403
+
404
+ loss = None
405
+ if labels is not None:
406
+ # Shift so that tokens < n predict n
407
+ shift_logits = logits[..., :-1, :].contiguous()
408
+ shift_labels = labels[..., 1:].contiguous()
409
+ # Flatten the tokens
410
+ loss_fct = CrossEntropyLoss()
411
+ shift_logits = shift_logits.view(
412
+ -1, self.language_model.config.vocab_size)
413
+ shift_labels = shift_labels.view(-1)
414
+ # Enable model parallelism
415
+ shift_labels = shift_labels.to(shift_logits.device)
416
+ loss = loss_fct(shift_logits, shift_labels)
417
+
418
+ if not return_dict:
419
+ output = (logits,) + outputs[1:]
420
+ return (loss,) + output if loss is not None else output
421
+
422
+ return CausalLMOutputWithPast(
423
+ loss=loss,
424
+ logits=logits,
425
+ past_key_values=outputs.past_key_values,
426
+ hidden_states=outputs.hidden_states,
427
+ attentions=outputs.attentions,
428
+ )
429
+
430
+ @torch.no_grad()
431
+ def generate(
432
+ self,
433
+ pixel_values: Optional[torch.FloatTensor] = None,
434
+ input_ids: Optional[torch.FloatTensor] = None,
435
+ attention_mask: Optional[torch.LongTensor] = None,
436
+ visual_features: Optional[torch.FloatTensor] = None,
437
+ generation_config: Optional[GenerationConfig] = None,
438
+ output_hidden_states: Optional[bool] = None,
439
+ return_dict: Optional[bool] = None,
440
+ prompt_masks=None,
441
+ vp_overall_mask=None,
442
+ **generate_kwargs,
443
+ ) -> torch.LongTensor:
444
+ device = self.device
445
+ assert self.img_context_token_id is not None
446
+
447
+ if pixel_values is not None:
448
+ if visual_features is not None:
449
+ vit_embeds = visual_features
450
+ else:
451
+ if type(pixel_values) is list or pixel_values.ndim == 5:
452
+ if type(pixel_values) is list:
453
+ pixel_values = [
454
+ x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values
455
+ ]
456
+ # b*n, c, h, w
457
+ pixel_values = torch.cat(
458
+ [image.to(self.vision_model.dtype) for image in pixel_values], dim=0)
459
+
460
+ vit_embeds = self.extract_feature(pixel_values.to(device))
461
+ image_flags = torch.sum(pixel_values, dim=(1, 2, 3)) != 0
462
+ image_flags = image_flags.long()
463
+ vit_embeds = vit_embeds[image_flags == 1]
464
+
465
+ input_embeds = self.language_model.get_input_embeddings()(input_ids.to(device))
466
+ B, N, C = input_embeds.shape
467
+ input_embeds = input_embeds.reshape(B * N, C)
468
+
469
+ if vp_overall_mask is not None and prompt_masks is not None:
470
+ vp_embeds = []
471
+ vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool()
472
+ prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks]
473
+
474
+ vp_overall_mask = vp_overall_mask[image_flags == 1]
475
+ overall_tile_vit_embeds = vit_embeds[vp_overall_mask] # (n_img, hw, c)
476
+
477
+ i_vp_img = 0
478
+ for i_img in range(len(vit_embeds)):
479
+ vp_embeds.append(vit_embeds[i_img].reshape(-1, C))
480
+ if vp_overall_mask[i_img]:
481
+ tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) # (hw, C)
482
+ objects_prompt_masks = prompt_masks[i_vp_img]
483
+ n_obj = len(objects_prompt_masks)
484
+ tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1)
485
+ objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1)
486
+ vp_embeds.append(tile_vit_embeds[objects_prompt_masks])
487
+ i_vp_img += 1
488
+ vp_embeds = torch.cat(vp_embeds, dim=0)
489
+ else:
490
+ vp_embeds = None
491
+
492
+ input_ids = input_ids.reshape(B * N)
493
+ selected = (input_ids == self.img_context_token_id)
494
+ assert selected.sum() != 0
495
+ if vp_embeds is None:
496
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
497
+ else:
498
+ if len(input_embeds[selected]) != len(vp_embeds.reshape(-1, C)):
499
+ print("Shape mismatch, selected is {}, vp embeds is {} !!!" \
500
+ .format(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C))))
501
+ min_tokens = min(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C)))
502
+ input_embeds[selected][:min_tokens] = vp_embeds.reshape(-1, C)[:min_tokens].to(input_embeds.device)
503
+ else:
504
+ input_embeds[selected] = vp_embeds.reshape(-1, C).to(input_embeds.device)
505
+
506
+ input_embeds = input_embeds.reshape(B, N, C)
507
+ else:
508
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
509
+
510
+ outputs = self.language_model.generate(
511
+ inputs_embeds=input_embeds,
512
+ attention_mask=attention_mask.to(device),
513
+ generation_config=generation_config,
514
+ output_hidden_states=output_hidden_states,
515
+ return_dict=return_dict,
516
+ use_cache=True,
517
+ **generate_kwargs,
518
+ )
519
+
520
+ return outputs
521
+
522
+ def preparing_for_generation(self, tokenizer, max_new_tokens=2048, torch_dtype=torch.bfloat16):
523
+ # set stop criteria and generation configs for model
524
+ if not hasattr(self, 'tokenizer'):
525
+ self.tokenizer = tokenizer
526
+ self.bot_name = 'BOT'
527
+ stop_words = []
528
+ stop_words += self.template.get('STOP_WORDS', [])
529
+ stop_criteria = get_stop_criteria(
530
+ tokenizer=self.tokenizer, stop_words=stop_words)
531
+ self.stop_criteria = stop_criteria
532
+
533
+ default_generation_kwargs = dict(
534
+ max_new_tokens=max_new_tokens,
535
+ do_sample=False,
536
+ eos_token_id=self.tokenizer.eos_token_id,
537
+ pad_token_id=(
538
+ self.tokenizer.pad_token_id
539
+ if self.tokenizer.pad_token_id is not None
540
+ else self.tokenizer.eos_token_id
541
+ ),
542
+ )
543
+
544
+ self.gen_config = GenerationConfig(**default_generation_kwargs)
545
+ self.init_prediction_config = True
546
+ self.torch_dtype = torch_dtype
547
+ self.to(torch_dtype)
548
+ self.extra_image_processor = DirectResize(target_length=1024, )
549
+ # for multi image process
550
+ self.min_dynamic_patch = 1
551
+ self.max_dynamic_patch = 12
552
+ self.downsample_ratio = 0.5
553
+ self.image_size = 448
554
+ self.use_thumbnail = True
555
+ patch_size = 14
556
+ self.patch_size = patch_size
557
+
558
+ self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2))
559
+ self.IMAGENET_MEAN = (0.485, 0.456, 0.406)
560
+ self.IMAGENET_STD = (0.229, 0.224, 0.225)
561
+ self.IMG_CONTEXT_TOKEN = '<IMG_CONTEXT>'
562
+ self.IMG_START_TOKEN = '<img>'
563
+ self.IMG_END_TOKEN = '</img>'
564
+
565
+ self.transformer = T.Compose([
566
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
567
+ T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC),
568
+ T.ToTensor(),
569
+ T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD)
570
+ ])
571
+ self.VP_START_TOKEN = '<vp>'
572
+ self.VP_END_TOKEN = '</vp>'
573
+
574
+ # change phi3 prepare for generation fuction
575
+ if self.config.llm_config.architectures[0] == 'Phi3ForCausalLM':
576
+ self.language_model.prepare_inputs_for_generation = MethodType(prepare_inputs_for_generation_phi3, self.language_model)
577
+
578
+ img_context_token_id = tokenizer.convert_tokens_to_ids('<IMG_CONTEXT>')
579
+ self.img_context_token_id = img_context_token_id
580
+ self.seg_token_idx = tokenizer.convert_tokens_to_ids('[SEG]')
581
+ return
582
+
583
+ def predict_forward(
584
+ self,
585
+ image=None,
586
+ text=None,
587
+ past_text='',
588
+ mask_prompts=None,
589
+ tokenizer=None,
590
+ ):
591
+ if not self.init_prediction_config:
592
+ assert tokenizer
593
+ self.preparing_for_generation(tokenizer=tokenizer)
594
+
595
+ input_dict = {}
596
+
597
+ ori_image_size = image.size
598
+
599
+ # prepare grounding images
600
+ g_image = np.array(image) # for grounding
601
+ g_image = self.extra_image_processor.apply_image(g_image)
602
+ g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous().to(self.torch_dtype)
603
+ input_dict['g_pixel_values'] = g_pixel_values
604
+
605
+ images = dynamic_preprocess(image, self.min_dynamic_patch,
606
+ self.max_dynamic_patch,
607
+ self.image_size, self.use_thumbnail)
608
+
609
+ if mask_prompts is not None:
610
+ vp_overall_mask = torch.Tensor([False] * (len(images) - 1) + [True])
611
+ input_dict['vp_overall_mask'] = vp_overall_mask
612
+ else:
613
+ input_dict['vp_overall_mask'] = None
614
+
615
+ pixel_values = [self.transformer(image) for image in images]
616
+ pixel_values = torch.stack(pixel_values).to(self.torch_dtype)
617
+ input_dict['pixel_values'] = pixel_values
618
+ num_image_tokens = pixel_values.shape[0] * self.patch_token
619
+
620
+ if mask_prompts is not None:
621
+ # reshape mask prompts to feature size
622
+ mask_prompts = [torch.Tensor(item).to(pixel_values.device) for item in mask_prompts]
623
+ mask_prompts = [F.interpolate(
624
+ item.unsqueeze(0),
625
+ size=(int(self.image_size // self.patch_size * self.downsample_ratio),
626
+ int(self.image_size // self.patch_size * self.downsample_ratio)),
627
+ mode='nearest').squeeze(0) for item in mask_prompts]
628
+ region_pixels = []
629
+ for mask_prompt in mask_prompts[0]:
630
+ region_pixels.append(mask_prompt.to(torch.int64).sum())
631
+
632
+ vp_token_str = '\nThere are {} part regions in the picture: '.format(len(mask_prompts[0]))
633
+ for i in range(len(mask_prompts[0])):
634
+ vp_token_str = vp_token_str + \
635
+ f"region{i + 1}" + self.VP_START_TOKEN + \
636
+ self.IMG_CONTEXT_TOKEN * region_pixels[i] + \
637
+ self.VP_END_TOKEN
638
+ if i == len(mask_prompts[0]) - 1:
639
+ vp_token_str = vp_token_str + '.\n'
640
+ else:
641
+ vp_token_str = vp_token_str + ', '
642
+ else:
643
+ vp_token_str = ''
644
+
645
+ image_token_str = f'{self.IMG_START_TOKEN}' \
646
+ f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \
647
+ f'{self.IMG_END_TOKEN}'
648
+
649
+ ret_masks = []
650
+
651
+ if '<image>' in text or mask_prompts is not None:
652
+ assert past_text is None or len(past_text) == 0
653
+ text = text.replace('<image>', image_token_str + vp_token_str)
654
+ input_text = ''
655
+ input_text += self.template['INSTRUCTION'].format(
656
+ input=text, round=1, bot_name=self.bot_name)
657
+ input_text = past_text + input_text
658
+ ids = self.tokenizer.encode(input_text)
659
+ ids = torch.tensor(ids).cuda().unsqueeze(0)
660
+
661
+ attention_mask = torch.ones_like(ids, dtype=torch.bool)
662
+
663
+ mm_inputs = {
664
+ 'pixel_values': input_dict['pixel_values'],
665
+ 'input_ids': ids,
666
+ 'attention_mask': attention_mask,
667
+ 'position_ids': None,
668
+ 'past_key_values': None,
669
+ 'labels': None,
670
+ 'prompt_masks': mask_prompts,
671
+ 'vp_overall_mask': input_dict['vp_overall_mask'],
672
+ }
673
+
674
+ generate_output = self.generate(
675
+ **mm_inputs,
676
+ generation_config=self.gen_config,
677
+ streamer=None,
678
+ bos_token_id=self.tokenizer.bos_token_id,
679
+ stopping_criteria=self.stop_criteria,
680
+ output_hidden_states=True,
681
+ return_dict_in_generate=True
682
+ )
683
+ predict = self.tokenizer.decode(
684
+ generate_output.sequences[0], skip_special_tokens=False).strip()
685
+
686
+ # if have seg result, find the seg hidden states
687
+ hidden_states = generate_output.hidden_states
688
+ last_hidden_states = [item[-1][0] for item in hidden_states]
689
+ last_hidden_states = torch.cat(last_hidden_states, dim=0)
690
+ seg_hidden_states = get_seg_hidden_states(
691
+ last_hidden_states, generate_output.sequences[0][:-1],
692
+ seg_id=self.seg_token_idx
693
+ )
694
+ all_seg_hidden_states = self.text_hidden_fcs(seg_hidden_states)
695
+
696
+ for seg_hidden_states in all_seg_hidden_states:
697
+ seg_hidden_states = seg_hidden_states.unsqueeze(0)
698
+ g_pixel_values = torch.stack([
699
+ self.grounding_encoder.preprocess_image(pixel, dtype=self.torch_dtype)
700
+ for pixel in [input_dict['g_pixel_values']]])
701
+ sam_states = self.grounding_encoder.get_sam2_embeddings(g_pixel_values)
702
+ pred_masks = self.grounding_encoder.inject_language_embd(sam_states, [seg_hidden_states])
703
+ w, h = ori_image_size
704
+ masks = F.interpolate(pred_masks, size=(h, w), mode='bilinear', align_corners=False)
705
+ masks = masks[:, 0]
706
+ masks = masks.sigmoid() > 0.5
707
+ masks = masks.int().cpu()
708
+ ret_masks.append(masks)
709
+
710
+ return {'prediction': predict, 'prediction_masks': ret_masks,}
711
+
712
+ def get_seg_hidden_states(hidden_states, output_ids, seg_id):
713
+ seg_mask = output_ids == seg_id
714
+ n_out = len(seg_mask)
715
+ return hidden_states[-n_out:][seg_mask]
716
+
717
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height,
718
+ image_size):
719
+ best_ratio_diff = float('inf')
720
+ best_ratio = (1, 1)
721
+ area = width * height
722
+ for ratio in target_ratios:
723
+ target_aspect_ratio = ratio[0] / ratio[1]
724
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
725
+ if ratio_diff < best_ratio_diff:
726
+ best_ratio_diff = ratio_diff
727
+ best_ratio = ratio
728
+ elif ratio_diff == best_ratio_diff:
729
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
730
+ best_ratio = ratio
731
+ return best_ratio
732
+
733
+ def dynamic_preprocess(image,
734
+ min_num=1,
735
+ max_num=6,
736
+ image_size=448,
737
+ use_thumbnail=False):
738
+ orig_width, orig_height = image.size
739
+ aspect_ratio = orig_width / orig_height
740
+
741
+ # calculate the existing image aspect ratio
742
+ target_ratios = {(i, j)
743
+ for n in range(min_num, max_num + 1)
744
+ for i in range(1, n + 1) for j in range(1, n + 1)
745
+ if i * j <= max_num and i * j >= min_num}
746
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
747
+
748
+ # find the closest aspect ratio to the target
749
+ target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio,
750
+ target_ratios, orig_width,
751
+ orig_height, image_size)
752
+
753
+ # calculate the target width and height
754
+ target_width = image_size * target_aspect_ratio[0]
755
+ target_height = image_size * target_aspect_ratio[1]
756
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
757
+
758
+ # resize the image
759
+ resized_img = image.resize((target_width, target_height))
760
+ processed_images = []
761
+ for i in range(blocks):
762
+ box = ((i % (target_width // image_size)) * image_size,
763
+ (i // (target_width // image_size)) * image_size,
764
+ ((i % (target_width // image_size)) + 1) * image_size,
765
+ ((i // (target_width // image_size)) + 1) * image_size)
766
+ # split the image
767
+ split_img = resized_img.crop(box)
768
+ processed_images.append(split_img)
769
+ assert len(processed_images) == blocks
770
+ if use_thumbnail and len(processed_images) != 1:
771
+ thumbnail_img = image.resize((image_size, image_size))
772
+ processed_images.append(thumbnail_img)
773
+ return processed_images
774
+
775
+
776
+ from transformers.cache_utils import Cache, DynamicCache
777
+
778
+ def prepare_inputs_for_generation_phi3(
779
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
780
+ ):
781
+ if past_key_values is not None:
782
+ if isinstance(past_key_values, Cache):
783
+ cache_length = past_key_values.get_seq_length()
784
+ past_length = past_key_values.seen_tokens
785
+ max_cache_length = past_key_values.get_max_length()
786
+ else:
787
+ cache_length = past_length = past_key_values[0][0].shape[2]
788
+ max_cache_length = None
789
+
790
+ # Keep only the unprocessed tokens:
791
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
792
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
793
+ # input)
794
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
795
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):]
796
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
797
+ # input_ids based on the past_length.
798
+ elif past_length < input_ids.shape[1]:
799
+ input_ids = input_ids[:, past_length:]
800
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
801
+
802
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
803
+ if (
804
+ max_cache_length is not None
805
+ and attention_mask is not None
806
+ and cache_length + input_ids.shape[1] > max_cache_length
807
+ ):
808
+ attention_mask = attention_mask[:, -max_cache_length:]
809
+
810
+ position_ids = kwargs.get('position_ids', None)
811
+ if attention_mask is not None and position_ids is None:
812
+ # create position_ids on the fly for batch generation
813
+ position_ids = attention_mask.long().cumsum(-1) - 1
814
+ position_ids.masked_fill_(attention_mask == 0, 1)
815
+ if past_key_values:
816
+ position_ids = position_ids[:, -input_ids.shape[1]:]
817
+
818
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
819
+ if inputs_embeds is not None and (past_key_values is None or len(past_key_values)==0):
820
+ model_inputs = {'inputs_embeds': inputs_embeds}
821
+ else:
822
+ model_inputs = {'input_ids': input_ids}
823
+
824
+ model_inputs.update(
825
+ {
826
+ 'position_ids': position_ids,
827
+ 'past_key_values': past_key_values,
828
+ 'use_cache': kwargs.get('use_cache'),
829
+ 'attention_mask': attention_mask,
830
+ }
831
+ )
832
+ return model_inputs
833
+
sam2.py ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<img>",
4
+ "</img>",
5
+ "<IMG_CONTEXT>",
6
+ "<quad>",
7
+ "</quad>",
8
+ "<ref>",
9
+ "</ref>",
10
+ "<box>",
11
+ "</box>"
12
+ ],
13
+ "bos_token": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "eos_token": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": true,
25
+ "single_word": false
26
+ },
27
+ "pad_token": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": true,
32
+ "single_word": false
33
+ },
34
+ "unk_token": {
35
+ "content": "<unk>",
36
+ "lstrip": false,
37
+ "normalized": false,
38
+ "rstrip": false,
39
+ "single_word": false
40
+ }
41
+ }
templates.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ PROMPT_TEMPLATE = dict(
3
+ default=dict(
4
+ SYSTEM='<|System|>:{system}\n',
5
+ INSTRUCTION='<|User|>:{input}\n<|Bot|>:',
6
+ SEP='\n'),
7
+ zephyr=dict(
8
+ SYSTEM='<|system|>\n{system}\n',
9
+ INSTRUCTION='<|user|>\n{input}\n<|assistant|>\n',
10
+ SEP='\n'),
11
+ internlm_chat=dict(
12
+ SYSTEM='<|System|>:{system}\n',
13
+ INSTRUCTION='<|User|>:{input}<eoh>\n<|Bot|>:',
14
+ SUFFIX='<eoa>',
15
+ SUFFIX_AS_EOS=True,
16
+ SEP='\n',
17
+ STOP_WORDS=['<eoa>']),
18
+ internlm2_chat=dict(
19
+ SYSTEM='<|im_start|>system\n{system}<|im_end|>\n',
20
+ INSTRUCTION=('<|im_start|>user\n{input}<|im_end|>\n'
21
+ '<|im_start|>assistant\n'),
22
+ SUFFIX='<|im_end|>',
23
+ SUFFIX_AS_EOS=True,
24
+ SEP='\n',
25
+ STOP_WORDS=['<|im_end|>']),
26
+ moss_sft=dict(
27
+ SYSTEM='{system}\n',
28
+ INSTRUCTION='<|Human|>: {input}<eoh>\n',
29
+ SEP='\n',
30
+ STOP_WORDS=['<eoc>', '<eom>']),
31
+ llama2_chat=dict(
32
+ SYSTEM=(
33
+ '[INST] <<SYS>>\n You are a helpful, respectful and honest '
34
+ 'assistant. Always answer as helpfully as possible, while being '
35
+ 'safe. Your answers should not include any harmful, unethical, '
36
+ 'racist, sexist, toxic, dangerous, or illegal content. Please '
37
+ 'ensure that your responses are socially unbiased and positive in '
38
+ 'nature.\n{system}\n<</SYS>>\n [/INST] '),
39
+ INSTRUCTION='[INST] {input} [/INST]',
40
+ SEP='\n'),
41
+ code_llama_chat=dict(
42
+ SYSTEM='{system}\n', INSTRUCTION='[INST] {input} [/INST]'),
43
+ chatglm2=dict(
44
+ SYSTEM='{system}\n',
45
+ INSTRUCTION='[Round {round}]\n\n问:{input}\n\n答:',
46
+ SEP='\n\n'),
47
+ chatglm3=dict(
48
+ SYSTEM='<|system|>\n{system}',
49
+ INSTRUCTION='<|user|>\n{input}<|assistant|>\n',
50
+ SEP='\n'),
51
+ qwen_chat=dict(
52
+ SYSTEM=('<|im_start|>system\n{system}<|im_end|>\n'),
53
+ INSTRUCTION=('<|im_start|>user\n{input}<|im_end|>\n'
54
+ '<|im_start|>assistant\n'),
55
+ SUFFIX='<|im_end|>',
56
+ SUFFIX_AS_EOS=True,
57
+ SEP='\n',
58
+ STOP_WORDS=['<|im_end|>', '<|endoftext|>']),
59
+ baichuan_chat=dict(
60
+ SYSTEM='{system}\n',
61
+ INSTRUCTION='<reserved_102>{input}<reserved_103>',
62
+ SEP='\n'),
63
+ baichuan2_chat=dict(
64
+ SYSTEM='{system}\n',
65
+ INSTRUCTION='<reserved_106>{input}<reserved_107>',
66
+ SEP='\n'),
67
+ wizardlm=dict(
68
+ SYSTEM=('A chat between a curious user and an artificial '
69
+ 'intelligence assistant. The assistant gives '
70
+ 'helpful, detailed, and polite answers to the '
71
+ 'user\'s questions. {system}\n '),
72
+ INSTRUCTION=('USER: {input} ASSISTANT:'),
73
+ SEP='\n'),
74
+ wizardcoder=dict(
75
+ SYSTEM=(
76
+ 'Below is an instruction that describes a task. '
77
+ 'Write a response that appropriately completes the request.\n\n'
78
+ '{system}\n '),
79
+ INSTRUCTION=('### Instruction:\n{input}\n\n### Response:'),
80
+ SEP='\n\n'),
81
+ vicuna=dict(
82
+ SYSTEM=('A chat between a curious user and an artificial '
83
+ 'intelligence assistant. The assistant gives '
84
+ 'helpful, detailed, and polite answers to the '
85
+ 'user\'s questions. {system}\n '),
86
+ INSTRUCTION=('USER: {input} ASSISTANT:'),
87
+ SEP='\n'),
88
+ deepseek_coder=dict(
89
+ SYSTEM=('You are an AI programming assistant, utilizing '
90
+ 'the DeepSeek Coder model, developed by DeepSeek'
91
+ 'Company, and you only answer questions related '
92
+ 'to computer science. For politically sensitive '
93
+ 'questions, security and privacy issues, and '
94
+ 'other non-computer science questions, you will '
95
+ 'refuse to answer. {system}\n'),
96
+ INSTRUCTION=('### Instruction:\n{input}\n### Response:\n'),
97
+ SEP='\n'),
98
+ # TODO: deprecation, v0.2.0
99
+ deepseekcoder=dict(
100
+ SYSTEM=('You are an AI programming assistant, utilizing '
101
+ 'the DeepSeek Coder model, developed by DeepSeek'
102
+ 'Company, and you only answer questions related '
103
+ 'to computer science. For politically sensitive '
104
+ 'questions, security and privacy issues, and '
105
+ 'other non-computer science questions, you will '
106
+ 'refuse to answer. {system}\n'),
107
+ INSTRUCTION=('### Instruction:\n{input}\n### Response:\n'),
108
+ SEP='\n'),
109
+ deepseek_moe=dict(
110
+ SYSTEM=('[INST] {system} [/INST]\n'),
111
+ INSTRUCTION=('[INST] {input} [/INST]'),
112
+ SEP='\n'),
113
+ deepseek_v2=dict(
114
+ SYSTEM='{system}\n\n',
115
+ INSTRUCTION='User: {input}\n\nAssistant: ',
116
+ SUFFIX='<|end▁of▁sentence|>',
117
+ SUFFIX_AS_EOS=True,
118
+ STOP_WORDS=['<|end▁of▁sentence|>']),
119
+ mistral=dict(
120
+ SYSTEM=('[INST] {system} [/INST]\n'),
121
+ INSTRUCTION=('[INST] {input} [/INST]'),
122
+ SEP='\n'),
123
+ mixtral=dict(
124
+ SYSTEM=('[INST] {system} [/INST]\n'),
125
+ INSTRUCTION=('[INST] {input} [/INST]'),
126
+ SEP='\n'),
127
+ minicpm=dict(INSTRUCTION=('<用户> {input} <AI>'), SEP='\n'),
128
+ minicpm3=dict(
129
+ SYSTEM=('<|im_start|>system\n{system}<|im_end|>\n'),
130
+ INSTRUCTION=('<|im_start|>user\n{input}<|im_end|>\n'
131
+ '<|im_start|>assistant\n'),
132
+ SUFFIX='<|im_end|>',
133
+ SUFFIX_AS_EOS=True,
134
+ SEP='\n',
135
+ STOP_WORDS=['<|im_end|>', '<|endoftext|>']),
136
+ gemma=dict(
137
+ # `system` field is extended by xtuner
138
+ SYSTEM=('<start_of_turn>system\n{system}<end_of_turn>\n'),
139
+ INSTRUCTION=('<start_of_turn>user\n{input}<end_of_turn>\n'
140
+ '<start_of_turn>model\n'),
141
+ SUFFIX='<end_of_turn>',
142
+ SUFFIX_AS_EOS=False,
143
+ SEP='\n',
144
+ STOP_WORDS=['<end_of_turn>']),
145
+ cohere_chat=dict(
146
+ SYSTEM=('<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system}'
147
+ '<|END_OF_TURN_TOKEN|>'),
148
+ INSTRUCTION=(
149
+ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{input}<|END_OF_TURN_TOKEN|>'
150
+ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>'),
151
+ SUFFIX='<|END_OF_TURN_TOKEN|>',
152
+ SUFFIX_AS_EOS=True,
153
+ STOP_WORDS=['<|END_OF_TURN_TOKEN|>']),
154
+ llama3_chat=dict(
155
+ SYSTEM=('<|start_header_id|>system<|end_header_id|>\n\n'
156
+ '{system}<|eot_id|>'),
157
+ INSTRUCTION=(
158
+ '<|start_header_id|>user<|end_header_id|>\n\n{input}<|eot_id|>'
159
+ '<|start_header_id|>assistant<|end_header_id|>\n\n'),
160
+ SUFFIX='<|eot_id|>',
161
+ SUFFIX_AS_EOS=True,
162
+ STOP_WORDS=['<|eot_id|>']),
163
+ phi3_chat=dict(
164
+ SYSTEM='<|system|>\n{system}<|end|>\n',
165
+ INSTRUCTION='<|user|>\n{input}<|end|>\n<|assistant|>\n',
166
+ SUFFIX='<|end|>',
167
+ SUFFIX_AS_EOS=True,
168
+ SEP='\n',
169
+ STOP_WORDS=['<|end|>']),
170
+ )
tokenization_internlm2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+ from transformers.tokenization_utils import PreTrainedTokenizer
24
+ from transformers.utils import logging
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+
33
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
+ class InternLM2Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
+ model_input_names = ['input_ids', 'attention_mask']
46
+ _auto_class = 'AutoTokenizer'
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ unk_token='<unk>',
52
+ bos_token='<s>',
53
+ eos_token='</s>',
54
+ pad_token='</s>',
55
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
+ add_bos_token=True,
57
+ add_eos_token=False,
58
+ decode_with_prefix_space=False,
59
+ clean_up_tokenization_spaces=False,
60
+ **kwargs,
61
+ ):
62
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
+ self.vocab_file = vocab_file
64
+ self.add_bos_token = add_bos_token
65
+ self.add_eos_token = add_eos_token
66
+ self.decode_with_prefix_space = decode_with_prefix_space
67
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
+ self.sp_model.Load(vocab_file)
69
+ self._no_prefix_space_tokens = None
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
+ **kwargs,
77
+ )
78
+
79
+ @property
80
+ def no_prefix_space_tokens(self):
81
+ if self._no_prefix_space_tokens is None:
82
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
+ return self._no_prefix_space_tokens
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ """Returns vocab size"""
89
+ return self.sp_model.get_piece_size()
90
+
91
+ @property
92
+ def bos_token_id(self) -> Optional[int]:
93
+ return self.sp_model.bos_id()
94
+
95
+ @property
96
+ def eos_token_id(self) -> Optional[int]:
97
+ return self.sp_model.eos_id()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def _maybe_add_prefix_space(self, tokens, decoded):
119
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
+ return ' ' + decoded
121
+ else:
122
+ return decoded
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ''
128
+ prev_is_special = False
129
+ for token in tokens:
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special:
133
+ out_string += ' '
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ out_string = self.clean_up_tokenization(out_string)
142
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
+ return out_string[1:]
144
+
145
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
+ return
159
+ out_vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
+ copyfile(self.vocab_file, out_vocab_file)
165
+ elif not os.path.isfile(self.vocab_file):
166
+ with open(out_vocab_file, 'wb') as fi:
167
+ content_spiece_model = self.sp_model.serialized_model_proto()
168
+ fi.write(content_spiece_model)
169
+
170
+ return (out_vocab_file,)
171
+
172
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
+ if self.add_bos_token:
174
+ bos_token_ids = [self.bos_token_id]
175
+ else:
176
+ bos_token_ids = []
177
+
178
+ output = bos_token_ids + token_ids_0
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + token_ids_1
182
+
183
+ if self.add_eos_token:
184
+ output = output + [self.eos_token_id]
185
+
186
+ return output
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
+ )
210
+
211
+ if token_ids_1 is None:
212
+ return [1] + ([0] * len(token_ids_0)) + [1]
213
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
+
215
+ def create_token_type_ids_from_sequences(
216
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
+ ) -> List[int]:
218
+ """
219
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
+ use of token type ids, therefore a list of zeros is returned.
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of zeros.
230
+ """
231
+ eos = [self.eos_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(token_ids_0 + eos) * [0]
235
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenization_internlm2_fast.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama_fast.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization Fast class for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, Optional, Tuple
21
+
22
+ from tokenizers import Tokenizer, decoders, normalizers, processors
23
+ from tokenizers.models import BPE
24
+ from transformers.convert_slow_tokenizer import (SLOW_TO_FAST_CONVERTERS,
25
+ SentencePieceExtractor,
26
+ SpmConverter)
27
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
28
+ from transformers.utils import logging
29
+
30
+ from .tokenization_internlm2 import InternLM2Tokenizer
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
35
+
36
+
37
+ # Modified from transformers.convert_slow_tokenizer.LlamaConverter
38
+ class InternLM2Converter(SpmConverter):
39
+ handle_byte_fallback = True
40
+
41
+ def vocab(self, proto):
42
+ vocab = [
43
+ ('<unk>', 0.0),
44
+ ('<s>', 0.0),
45
+ ('</s>', 0.0),
46
+ ]
47
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
48
+ return vocab
49
+
50
+ def unk_id(self, proto):
51
+ unk_id = 0
52
+ return unk_id
53
+
54
+ def decoder(self, replacement, add_prefix_space):
55
+ return decoders.Sequence(
56
+ [
57
+ decoders.Replace('▁', ' '),
58
+ decoders.ByteFallback(),
59
+ decoders.Fuse(),
60
+ decoders.Strip(content=' ', left=1),
61
+ ]
62
+ )
63
+
64
+ def tokenizer(self, proto):
65
+ model_type = proto.trainer_spec.model_type
66
+ vocab_scores = self.vocab(proto)
67
+ # special tokens
68
+ added_tokens = self.original_tokenizer.added_tokens_decoder
69
+ for i in range(len(vocab_scores)):
70
+ piece, score = vocab_scores[i]
71
+ if i in added_tokens:
72
+ vocab_scores[i] = (added_tokens[i].content, score)
73
+ if model_type == 1:
74
+ raise RuntimeError('InternLM2 is supposed to be a BPE model!')
75
+
76
+ elif model_type == 2:
77
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
78
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
79
+ tokenizer = Tokenizer(
80
+ BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
81
+ )
82
+ tokenizer.add_special_tokens(
83
+ [ added_token for index, added_token in added_tokens.items()]
84
+ )
85
+ else:
86
+ raise Exception(
87
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
88
+ )
89
+
90
+ return tokenizer
91
+
92
+ def normalizer(self, proto):
93
+ normalizers_list = []
94
+ if proto.normalizer_spec.add_dummy_prefix:
95
+ normalizers_list.append(normalizers.Prepend(prepend='▁'))
96
+ normalizers_list.append(normalizers.Replace(pattern=' ', content='▁'))
97
+ return normalizers.Sequence(normalizers_list)
98
+
99
+ def pre_tokenizer(self, replacement, add_prefix_space):
100
+ return None
101
+
102
+
103
+ SLOW_TO_FAST_CONVERTERS['InternLM2Tokenizer'] = InternLM2Converter
104
+
105
+
106
+ # Modified from transformers.model.llama.tokenization_llama_fast.LlamaTokenizerFast -> InternLM2TokenizerFast
107
+ class InternLM2TokenizerFast(PreTrainedTokenizerFast):
108
+ vocab_files_names = VOCAB_FILES_NAMES
109
+ slow_tokenizer_class = InternLM2Tokenizer
110
+ padding_side = 'left'
111
+ model_input_names = ['input_ids', 'attention_mask']
112
+ _auto_class = 'AutoTokenizer'
113
+
114
+ def __init__(
115
+ self,
116
+ vocab_file,
117
+ unk_token='<unk>',
118
+ bos_token='<s>',
119
+ eos_token='</s>',
120
+ pad_token='</s>',
121
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
122
+ add_bos_token=True,
123
+ add_eos_token=False,
124
+ decode_with_prefix_space=False,
125
+ clean_up_tokenization_spaces=False,
126
+ **kwargs,
127
+ ):
128
+ super().__init__(
129
+ vocab_file=vocab_file,
130
+ unk_token=unk_token,
131
+ bos_token=bos_token,
132
+ eos_token=eos_token,
133
+ pad_token=pad_token,
134
+ sp_model_kwargs=sp_model_kwargs,
135
+ add_bos_token=add_bos_token,
136
+ add_eos_token=add_eos_token,
137
+ decode_with_prefix_space=decode_with_prefix_space,
138
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
139
+ **kwargs,
140
+ )
141
+ self._add_bos_token = add_bos_token
142
+ self._add_eos_token = add_eos_token
143
+ self.update_post_processor()
144
+ self.vocab_file = vocab_file
145
+
146
+ @property
147
+ def can_save_slow_tokenizer(self) -> bool:
148
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
149
+
150
+ def update_post_processor(self):
151
+ """
152
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
153
+ """
154
+ bos = self.bos_token
155
+ bos_token_id = self.bos_token_id
156
+ if bos is None and self.add_bos_token:
157
+ raise ValueError('add_bos_token = True but bos_token = None')
158
+
159
+ eos = self.eos_token
160
+ eos_token_id = self.eos_token_id
161
+ if eos is None and self.add_eos_token:
162
+ raise ValueError('add_eos_token = True but eos_token = None')
163
+
164
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
165
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
166
+
167
+ special_tokens = []
168
+ if self.add_bos_token:
169
+ special_tokens.append((bos, bos_token_id))
170
+ if self.add_eos_token:
171
+ special_tokens.append((eos, eos_token_id))
172
+ self._tokenizer.post_processor = processors.TemplateProcessing(
173
+ single=single, pair=pair, special_tokens=special_tokens
174
+ )
175
+
176
+ @property
177
+ def add_eos_token(self):
178
+ return self._add_eos_token
179
+
180
+ @property
181
+ def add_bos_token(self):
182
+ return self._add_bos_token
183
+
184
+ @add_eos_token.setter
185
+ def add_eos_token(self, value):
186
+ self._add_eos_token = value
187
+ self.update_post_processor()
188
+
189
+ @add_bos_token.setter
190
+ def add_bos_token(self, value):
191
+ self._add_bos_token = value
192
+ self.update_post_processor()
193
+
194
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
195
+ if not self.can_save_slow_tokenizer:
196
+ raise ValueError(
197
+ 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
198
+ 'tokenizer.'
199
+ )
200
+
201
+ if not os.path.isdir(save_directory):
202
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
203
+ return
204
+ out_vocab_file = os.path.join(
205
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
206
+ )
207
+
208
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
209
+ copyfile(self.vocab_file, out_vocab_file)
210
+
211
+ return (out_vocab_file,)
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<|endoftext|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|assistant|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|placeholder1|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "32003": {
55
+ "content": "<|placeholder2|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "32004": {
63
+ "content": "<|placeholder3|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "32005": {
71
+ "content": "<|placeholder4|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "32006": {
79
+ "content": "<|system|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "32007": {
87
+ "content": "<|end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "32008": {
95
+ "content": "<|placeholder5|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "32009": {
103
+ "content": "<|placeholder6|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": true,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "32010": {
111
+ "content": "<|user|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": true,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "32011": {
119
+ "content": "<img>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": true
125
+ },
126
+ "32012": {
127
+ "content": "</img>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": true
133
+ },
134
+ "32013": {
135
+ "content": "<IMG_CONTEXT>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": true
141
+ },
142
+ "32014": {
143
+ "content": "<quad>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": true
149
+ },
150
+ "32015": {
151
+ "content": "</quad>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": true
157
+ },
158
+ "32016": {
159
+ "content": "<ref>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": true
165
+ },
166
+ "32017": {
167
+ "content": "</ref>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": true
173
+ },
174
+ "32018": {
175
+ "content": "<box>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": true
181
+ },
182
+ "32019": {
183
+ "content": "</box>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": true
189
+ },
190
+ "32020": {
191
+ "content": "[SEG]",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": true
197
+ },
198
+ "32021": {
199
+ "content": "<p>",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": true
205
+ },
206
+ "32022": {
207
+ "content": "</p>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false,
212
+ "special": true
213
+ },
214
+ "32023": {
215
+ "content": "<vp>",
216
+ "lstrip": false,
217
+ "normalized": false,
218
+ "rstrip": false,
219
+ "single_word": false,
220
+ "special": true
221
+ },
222
+ "32024": {
223
+ "content": "</vp>",
224
+ "lstrip": false,
225
+ "normalized": false,
226
+ "rstrip": false,
227
+ "single_word": false,
228
+ "special": true
229
+ }
230
+ },
231
+ "additional_special_tokens": [
232
+ "<img>",
233
+ "</img>",
234
+ "<IMG_CONTEXT>",
235
+ "<quad>",
236
+ "</quad>",
237
+ "<ref>",
238
+ "</ref>",
239
+ "<box>",
240
+ "</box>"
241
+ ],
242
+ "bos_token": "<s>",
243
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
244
+ "clean_up_tokenization_spaces": false,
245
+ "eos_token": "</s>",
246
+ "legacy": false,
247
+ "model_max_length": 8192,
248
+ "pad_token": "</s>",
249
+ "padding_side": "right",
250
+ "sp_model_kwargs": {},
251
+ "spaces_between_special_tokens": false,
252
+ "tokenizer_class": "LlamaTokenizer",
253
+ "unk_token": "<unk>",
254
+ "use_default_system_prompt": false
255
+ }