Mihaiii commited on
Commit
b232de9
·
verified ·
1 Parent(s): 9ba1b1d

Upload modeling_ovis.py

Browse files
Files changed (1) hide show
  1. modeling_ovis.py +625 -0
modeling_ovis.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 AIDC-AI
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ #
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ import importlib.metadata
18
+
19
+ from packaging import version
20
+ from importlib import import_module
21
+ from typing import List, Callable, Union, Optional, Dict
22
+
23
+ import PIL.Image
24
+ import torch
25
+ import transformers
26
+ from torch import Tensor
27
+ from torch.nn import init
28
+ from torch.nn.functional import softmax, gumbel_softmax, pad
29
+ from transformers.utils import is_flash_attn_2_available
30
+ from transformers import PreTrainedModel, AutoModel, AutoTokenizer, AutoModelForCausalLM, AutoImageProcessor
31
+ from transformers import SiglipImageProcessor, SiglipVisionModel
32
+ from transformers.cache_utils import HybridCache
33
+ from transformers.generation.utils import GenerateOutput
34
+
35
+ from .configuration_ovis import BaseVisualTokenizerConfig, SiglipVisualTokenizerConfig
36
+ from .configuration_ovis import OvisConfig, ConversationFormatter
37
+ from .configuration_ovis import IGNORE_ID, IMAGE_ATOM_ID, IMAGE_INDICATOR_IDS, IMAGE_TOKEN_ID
38
+
39
+
40
+ # ----------------------------------------------------------------------
41
+ # Visual Tokenizer
42
+ # ----------------------------------------------------------------------
43
+ class BaseVisualTokenizer(PreTrainedModel):
44
+ base_model_prefix = "backbone"
45
+ main_input_name = None
46
+ _image_processor_class = None
47
+ _image_processor_kwargs = {}
48
+ _backbone_class = None
49
+ _backbone_name_or_path = None
50
+
51
+ def __init__(self, config: BaseVisualTokenizerConfig, *inputs, **kwargs):
52
+ super().__init__(config, *inputs, **kwargs)
53
+ self.image_processor = AutoImageProcessor.from_pretrained(kwargs['image_processor_name_or_path'])
54
+ self.backbone = AutoModel.from_config(self.config.backbone_config)
55
+ head_dim = self.config.vocab_size - len(IMAGE_INDICATOR_IDS) # reserved tokens for IMAGE_INDICATORS
56
+ self.head = torch.nn.Sequential(
57
+ torch.nn.Linear(
58
+ self.backbone.config.hidden_size * self.config.hidden_stride * self.config.hidden_stride, head_dim,
59
+ bias=False
60
+ ),
61
+ torch.nn.LayerNorm(head_dim)
62
+ )
63
+
64
+ assert all((self.image_processor.do_resize,
65
+ not getattr(self.image_processor, 'do_center_crop', False),
66
+ self.image_processor.do_rescale,
67
+ self.image_processor.do_normalize
68
+ )), f"image_processor `{self.image_processor}` is not supported currently"
69
+
70
+ def get_backbone(self):
71
+ return self.backbone
72
+
73
+ def get_image_processor(self):
74
+ return self.image_processor
75
+
76
+ def mock_input(self):
77
+ height, width = self.get_image_size()
78
+ return torch.zeros(1, 3, height, width), self.construct_image_placeholders((1, 1))
79
+
80
+ def get_head(self):
81
+ return self.head
82
+
83
+ def get_image_size(self):
84
+ raise NotImplementedError
85
+
86
+ @staticmethod
87
+ def construct_image_placeholders(grid):
88
+ image_placeholders = [IMAGE_INDICATOR_IDS[0], IMAGE_ATOM_ID, IMAGE_INDICATOR_IDS[1]]
89
+ if grid[0] * grid[1] > 1:
90
+ for r in range(grid[0]):
91
+ for c in range(grid[1]):
92
+ image_placeholders.append(IMAGE_ATOM_ID)
93
+ if c < grid[1] - 1:
94
+ image_placeholders.append(IMAGE_INDICATOR_IDS[2])
95
+ if r < grid[0] - 1:
96
+ image_placeholders.append(IMAGE_INDICATOR_IDS[3])
97
+ image_placeholders.append(IMAGE_INDICATOR_IDS[4])
98
+ return image_placeholders
99
+
100
+ def preprocess_image(self, image: PIL.Image.Image, max_partition=9, covering_threshold=0.9, convert_to_rgb=True):
101
+ def _preprocess(img: PIL.Image.Image, side):
102
+ # first resize and preprocess
103
+ w, h = img.size
104
+ if w == h:
105
+ new_width = new_height = side
106
+ elif w > h:
107
+ new_width = side
108
+ new_height = int(h / w * new_width)
109
+ else:
110
+ new_height = side
111
+ new_width = int(w / h * new_height)
112
+ new_size = dict(height=new_height, width=new_width)
113
+ pixel_values = self.image_processor.preprocess(img, size=new_size, return_tensors='pt')['pixel_values']
114
+
115
+ # then pad to square
116
+ square_values = torch.zeros([1, 3, side, side], dtype=pixel_values.dtype, device=pixel_values.device)
117
+ new_height, new_width = pixel_values.shape[2:]
118
+ if new_height == new_width:
119
+ square_values[:, :, :, :] = pixel_values
120
+ elif new_height > new_width:
121
+ from_index = (side - new_width) // 2
122
+ square_values[:, :, :, from_index:from_index + new_width] = pixel_values
123
+ else:
124
+ from_index = (side - new_height) // 2
125
+ square_values[:, :, from_index:from_index + new_height, :] = pixel_values
126
+
127
+ return square_values
128
+
129
+ def _partition(img, grid):
130
+ w, h = img.size
131
+ row_height = h // grid[0]
132
+ col_width = w // grid[1]
133
+
134
+ partition = []
135
+ for row in range(grid[0]):
136
+ for col in range(grid[1]):
137
+ left = col * col_width
138
+ upper = row * row_height
139
+ right = w if col == grid[1] - 1 else (col + 1) * col_width
140
+ lower = h if row == grid[0] - 1 else (row + 1) * row_height
141
+ partition.append((left, upper, right, lower))
142
+
143
+ return partition
144
+
145
+ def _covering_area(left, upper, right, lower, side):
146
+ w = right - left
147
+ h = lower - upper
148
+ w, h = max(w, h), min(w, h)
149
+ if w > side:
150
+ h = h / w * side
151
+ w = side
152
+ return w * h
153
+
154
+ def _get_best_grid(img, side):
155
+ img_area = img.size[0] * img.size[1]
156
+
157
+ candidate_grids = []
158
+ for i in range(1, max_partition + 1):
159
+ for j in range(1, max_partition + 1):
160
+ if i * j <= max_partition:
161
+ candidate_grids.append((i, j))
162
+
163
+ all_grids = []
164
+ good_grids = []
165
+ for grid in candidate_grids:
166
+ partition = _partition(img, grid)
167
+ covering_ratio = sum([_covering_area(*p, side) for p in partition]) / img_area
168
+ assert covering_ratio <= 1.0
169
+ all_grids.append((grid, covering_ratio))
170
+ if covering_ratio > covering_threshold:
171
+ good_grids.append((grid, covering_ratio))
172
+
173
+ if len(good_grids) > 0:
174
+ # pick the good partition with minimum #sub_images and break the tie using covering_ratio
175
+ return sorted(good_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0][0]
176
+ else:
177
+ # pick the partition with maximum covering_ratio and break the tie using #sub_images
178
+ return sorted(all_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0][0]
179
+
180
+ if convert_to_rgb and image.mode != 'RGB':
181
+ image = image.convert('RGB')
182
+
183
+ sides = self.get_image_size()
184
+ if sides[0] != sides[1]:
185
+ raise ValueError('get_image_size() returns non-square size')
186
+ side = sides[0]
187
+ grid = _get_best_grid(image, side)
188
+ partition = _partition(image, grid)
189
+ crops = [image.crop(p) for p in partition]
190
+ if len(crops) > 1:
191
+ crops.insert(0, image)
192
+ pixel_values = torch.cat([_preprocess(crop, side) for crop in crops], dim=0)
193
+ image_placeholders = self.construct_image_placeholders(grid)
194
+ return pixel_values, image_placeholders
195
+
196
+ def tokenize(self, logits):
197
+ def st_argmax(y_soft, dim): # straight-through softmax
198
+ index = y_soft.max(dim, keepdim=True)[1]
199
+ y_hard = torch.zeros_like(y_soft, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
200
+ ret = y_hard - y_soft.detach() + y_soft
201
+ return ret
202
+
203
+ if self.config.tokenize_function == 'softmax':
204
+ tokens = softmax(logits, dim=-1)
205
+ elif self.config.tokenize_function == 'gumbel_argmax':
206
+ tokens = gumbel_softmax(logits, tau=self.config.tau, hard=True)
207
+ elif self.config.tokenize_function == 'st_argmax':
208
+ tokens = st_argmax(logits, dim=-1)
209
+ else:
210
+ raise ValueError(
211
+ f'Invalid `max_type`, expected softmax or gumbel_argmax or st_argmax, but got {self.config.tokenize_function}')
212
+ return tokens
213
+
214
+ def encode(self, pixel_values):
215
+ output = self.backbone(pixel_values, output_hidden_states=True, return_dict=True)
216
+ features = output.hidden_states[-1]
217
+ if self.config.drop_cls_token:
218
+ features = features[:, 1:, :]
219
+
220
+ # merge number of `hidden_stride * hidden_stride` hidden states together to reduce token sequence length
221
+ # e.g., for hidden_stride=3, this leads to a token length reduction: 729 -> 81 for siglip
222
+ if self.config.hidden_stride > 1:
223
+ n, l, d = features.shape # this `d` maybe different from the above `d
224
+ sqrt_l = int(l ** 0.5)
225
+ assert sqrt_l ** 2 == l, "The token sequence length should be a perfect square."
226
+ features = features.reshape(n, sqrt_l, sqrt_l, d)
227
+ pl = (self.config.hidden_stride - (sqrt_l % self.config.hidden_stride)) % self.config.hidden_stride
228
+ features = pad(features, (0, 0, 0, pl, 0, pl), "constant", 0)
229
+ sqrt_l += pl
230
+ features = features.reshape(n, sqrt_l // self.config.hidden_stride, self.config.hidden_stride,
231
+ sqrt_l // self.config.hidden_stride, self.config.hidden_stride, d)
232
+ features = features.permute(0, 1, 3, 2, 4, 5) # [n, sqrt_l/hs, sqrt_l/hs, hs, hs, d]
233
+ features = features.flatten(3) # [n, sqrt_l/hs, sqrt_l/hs, hs*hs*d]
234
+ features = features.reshape(
235
+ n, -1, self.config.hidden_stride * self.config.hidden_stride * d)
236
+
237
+ return features
238
+
239
+ def forward(self, pixel_values) -> torch.Tensor: # [BatchSize, ImageShape] -> [BatchSize, #Token, VocabSize]
240
+ features = self.encode(pixel_values)
241
+ logits = self.head(features)
242
+ tokens = self.tokenize(logits)
243
+ # tokens' shape is [BatchSize, #Token, VocabSize-5], so padding with [BatchSize, #Token, 5], after
244
+ # which, tokens' shape should become [BatchSize, #Token, VocabSize]
245
+ batch_size, token_len, _ = tokens.shape
246
+ padding_tensor = torch.zeros(size=(batch_size, token_len, len(IMAGE_INDICATOR_IDS)),
247
+ dtype=tokens.dtype,
248
+ device=tokens.device,
249
+ layout=tokens.layout,
250
+ requires_grad=False)
251
+ tokens = torch.cat((tokens, padding_tensor), dim=2)
252
+ return tokens
253
+
254
+
255
+ class SiglipVisualTokenizer(BaseVisualTokenizer):
256
+ config_class = SiglipVisualTokenizerConfig
257
+ supports_gradient_checkpointing = True
258
+ _no_split_modules = ["SiglipVisionTransformer"]
259
+ _image_processor_class = SiglipImageProcessor
260
+ _image_processor_kwargs = {}
261
+ _backbone_class = SiglipVisionModel
262
+ _backbone_name_or_path = "google/siglip-so400m-patch14-384"
263
+
264
+ def get_image_size(self):
265
+ height = self.image_processor.size["height"]
266
+ width = self.image_processor.size["width"]
267
+ return height, width
268
+
269
+
270
+ AutoModel.register(SiglipVisualTokenizerConfig, SiglipVisualTokenizer)
271
+
272
+
273
+ # ----------------------------------------------------------------------
274
+ # Ovis
275
+ # ----------------------------------------------------------------------
276
+ class VisualEmbedding(torch.nn.Embedding):
277
+ def forward(self, visual_tokens: Tensor) -> Tensor:
278
+ if visual_tokens.dtype in [torch.int8, torch.int16, torch.int32, torch.int64, torch.long]:
279
+ return super().forward(visual_tokens)
280
+ return torch.matmul(visual_tokens, self.weight)
281
+
282
+ def reset_parameters(self, mean=0., std=1.) -> None:
283
+ init.normal_(self.weight, mean=mean, std=std)
284
+ self._fill_padding_idx_with_zero()
285
+
286
+
287
+ class OvisPreTrainedModel(PreTrainedModel):
288
+ config_class = OvisConfig
289
+ base_model_prefix = "ovis"
290
+
291
+
292
+ class Ovis(OvisPreTrainedModel):
293
+
294
+ def __init__(self, config: OvisConfig, *inputs, **kwargs):
295
+ super().__init__(config, *inputs, **kwargs)
296
+ attn_kwargs = dict()
297
+ if self.config.llm_attn_implementation:
298
+ if self.config.llm_attn_implementation == "sdpa":
299
+ raise ValueError("`sdpa` is currently not supported")
300
+ elif self.config.llm_attn_implementation == "flash_attention_2":
301
+ assert (is_flash_attn_2_available() and
302
+ version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.6.3")), \
303
+ "Using `flash_attention_2` requires having `flash_attn>=2.6.3` installed."
304
+ attn_kwargs["attn_implementation"] = self.config.llm_attn_implementation
305
+ self.llm = AutoModelForCausalLM.from_config(self.config.llm_config, **attn_kwargs)
306
+ assert self.config.hidden_size == self.llm.config.hidden_size, "hidden size mismatch"
307
+ self.text_tokenizer = AutoTokenizer.from_pretrained(self.config.name_or_path)
308
+ self.visual_tokenizer = AutoModel.from_config(self.config.visual_tokenizer_config,
309
+ image_processor_name_or_path=self.config.name_or_path)
310
+ self.vte = VisualEmbedding(
311
+ self.config.visual_tokenizer_config.vocab_size,
312
+ self.config.hidden_size,
313
+ device=self.visual_tokenizer.device,
314
+ dtype=self.visual_tokenizer.dtype
315
+ )
316
+
317
+ def _merge_modules(modules_list: tuple):
318
+ merged_modules = []
319
+ for modules in modules_list:
320
+ merged_modules.extend(modules if modules else [])
321
+ return merged_modules
322
+
323
+ self._no_split_modules = _merge_modules((self.llm._no_split_modules, self.visual_tokenizer._no_split_modules))
324
+ self._skip_keys_device_placement = self.llm._skip_keys_device_placement
325
+ self._keep_in_fp32_modules = _merge_modules(
326
+ (self.llm._keep_in_fp32_modules, self.visual_tokenizer._keep_in_fp32_modules))
327
+ self.is_parallelizable = all((self.llm.is_parallelizable, self.visual_tokenizer.is_parallelizable))
328
+ self.supports_gradient_checkpointing = all(
329
+ (self.llm.supports_gradient_checkpointing, self.visual_tokenizer.supports_gradient_checkpointing))
330
+ self._supports_flash_attn_2 = True
331
+ self._supports_sdpa = False
332
+
333
+ def get_text_tokenizer(self):
334
+ return self.text_tokenizer
335
+
336
+ def get_visual_tokenizer(self):
337
+ return self.visual_tokenizer
338
+
339
+ def tie_weights(self):
340
+ if not self.config.disable_tie_weight:
341
+ self.get_llm().tie_weights()
342
+
343
+ def get_llm(self):
344
+ return self.llm
345
+
346
+ def get_vte(self):
347
+ return self.vte
348
+
349
+ def get_wte(self):
350
+ return self.llm.get_input_embeddings()
351
+
352
+ def get_conversation_formatter(self) -> ConversationFormatter:
353
+ if getattr(self, 'conversation_formatter', None) is None:
354
+ self.conversation_formatter = getattr(import_module(".configuration_ovis", __package__),
355
+ self.config.conversation_formatter_class)(self.text_tokenizer)
356
+ return self.conversation_formatter
357
+
358
+ def forward(
359
+ self,
360
+ input_ids: torch.Tensor,
361
+ attention_mask: torch.Tensor,
362
+ labels: Optional[torch.Tensor],
363
+ pixel_values: List[Optional[torch.Tensor]],
364
+ **kwargs
365
+ ):
366
+ assert self.training, "`forward` can only be used in training. For inference, use `generate`."
367
+ _, inputs_embeds, labels, attention_mask = self.merge_multimodal(
368
+ text_input_ids=input_ids,
369
+ text_attention_masks=attention_mask,
370
+ text_labels=labels,
371
+ pixel_values=pixel_values
372
+ )
373
+ return self.llm(inputs_embeds=inputs_embeds, labels=labels, attention_mask=attention_mask, **kwargs)
374
+
375
+ def merge_multimodal(
376
+ self,
377
+ text_input_ids: torch.Tensor,
378
+ text_attention_masks: torch.Tensor,
379
+ text_labels: Optional[torch.Tensor],
380
+ pixel_values: List[Optional[torch.Tensor]],
381
+ left_padding: bool = False
382
+ ):
383
+ input_device = text_input_ids.device
384
+ visual_vocab_szie = self.get_visual_tokenizer().config.vocab_size
385
+ visual_indicator_embeds = self.get_vte()(
386
+ torch.tensor(
387
+ list(range(visual_vocab_szie - 5, visual_vocab_szie)),
388
+ dtype=torch.long,
389
+ device=self.get_visual_tokenizer().device
390
+ )
391
+ ).to(device=input_device)
392
+
393
+ if self.training:
394
+ # When training, to be compatible with deepspeed zero, each sample has to include pixel_value tensor.
395
+ # For text-only sample, one can simply use a full zero tensor as pixel_value, which will be ignored
396
+ # (see below in this function); so, the gradient will not be affected.
397
+ num_images = [x.shape[0] for x in pixel_values]
398
+ visual_tokens = self.visual_tokenizer(torch.cat([x for x in pixel_values], dim=0))
399
+ visual_embeds = torch.split(self.get_vte()(visual_tokens).to(dtype=self.dtype, device=input_device),
400
+ split_size_or_sections=num_images, dim=0)
401
+ visual_input_ids = torch.split(torch.argmax(visual_tokens, dim=-1).to(device=input_device),
402
+ split_size_or_sections=num_images, dim=0)
403
+ visual_labels = [torch.full(x.shape, IGNORE_ID, dtype=torch.long, device=input_device) for x in
404
+ visual_input_ids]
405
+ else:
406
+ # When inference, sample can include only text with `None` pixel_value
407
+ num_images = [x.shape[0] if x is not None else 0 for x in pixel_values]
408
+ if sum(num_images) > 0:
409
+ visual_tokens = self.visual_tokenizer(torch.cat([x for x in pixel_values if x is not None], dim=0))
410
+ visual_embeds = torch.split(self.get_vte()(visual_tokens).to(dtype=self.dtype, device=input_device),
411
+ split_size_or_sections=num_images, dim=0)
412
+ visual_input_ids = torch.split(torch.argmax(visual_tokens, dim=-1).to(device=input_device),
413
+ split_size_or_sections=num_images, dim=0)
414
+ visual_labels = [torch.full(x.shape, IGNORE_ID, dtype=torch.long, device=input_device) for x in
415
+ visual_input_ids]
416
+ else:
417
+ # just placeholders
418
+ visual_embeds = [None] * len(num_images)
419
+ visual_input_ids = [None] * len(num_images)
420
+ visual_labels = [None] * len(num_images)
421
+ if text_labels is None:
422
+ text_labels = torch.full(text_input_ids.shape, IGNORE_ID, dtype=torch.long, device=input_device)
423
+
424
+ input_embeds = []
425
+ attention_masks = []
426
+ labels = []
427
+ for text_input_id, text_label, text_attention_mask, visual_embed, visual_input_id, visual_label in zip(
428
+ text_input_ids, text_labels, text_attention_masks, visual_embeds, visual_input_ids, visual_labels
429
+ ):
430
+ placeholder_token_mask = torch.lt(text_input_id, 0)
431
+ text_embed = self.get_wte()(torch.masked_fill(text_input_id, placeholder_token_mask, 0))
432
+ for i, indicator_id in enumerate(IMAGE_INDICATOR_IDS):
433
+ text_embed[text_input_id == indicator_id] = visual_indicator_embeds[i]
434
+ image_atom_positions = torch.where(torch.eq(text_input_id, IMAGE_ATOM_ID))[0].tolist()
435
+ if len(image_atom_positions) > 0:
436
+ input_embed_parts = []
437
+ attention_mask_parts = []
438
+ label_parts = []
439
+ prev_image_atom_position = -1
440
+ for index, image_atom_position in enumerate(image_atom_positions):
441
+ input_embed_parts.append(
442
+ text_embed[prev_image_atom_position + 1:image_atom_position, :])
443
+ label_parts.append(
444
+ text_label[prev_image_atom_position + 1:image_atom_position])
445
+ attention_mask_parts.append(
446
+ text_attention_mask[prev_image_atom_position + 1:image_atom_position])
447
+ input_embed_parts.append(visual_embed[index])
448
+ attention_mask_parts.append(
449
+ torch.ones_like(visual_label[index], dtype=torch.bool))
450
+ label_parts.append(visual_label[index])
451
+ prev_image_atom_position = image_atom_position
452
+ if prev_image_atom_position + 1 < text_input_id.shape[0]:
453
+ input_embed_parts.append(
454
+ text_embed[prev_image_atom_position + 1:, :])
455
+ attention_mask_parts.append(
456
+ text_attention_mask[prev_image_atom_position + 1:])
457
+ label_parts.append(
458
+ text_label[prev_image_atom_position + 1:])
459
+ input_embed = torch.cat(input_embed_parts, dim=0)
460
+ attention_mask = torch.cat(attention_mask_parts, dim=0)
461
+ label = torch.cat(label_parts, dim=0)
462
+ else:
463
+ input_embed = text_embed
464
+ attention_mask = text_attention_mask
465
+ label = text_label
466
+ if self.training:
467
+ # Make visual_embed & visual_indicator_embeds involved in the backward graph,
468
+ # to be compatible with deepspeed zero and ddp.
469
+ input_embed += torch.sum(visual_embed * 0.0) + torch.sum(visual_indicator_embeds * 0.0)
470
+ input_embeds.append(input_embed)
471
+ attention_masks.append(attention_mask)
472
+ labels.append(label)
473
+
474
+ if self.training: # padding to self.config.multimodal_max_length for increased training speed
475
+ padding_size = max(0, self.config.multimodal_max_length - len(input_embeds[0]))
476
+ input_embeds[0] = torch.nn.ConstantPad2d((0, 0, 0, padding_size), 0.0)(input_embeds[0])
477
+ attention_masks[0] = torch.nn.ConstantPad1d((0, padding_size), False)(attention_masks[0])
478
+ labels[0] = torch.nn.ConstantPad1d((0, padding_size), IGNORE_ID)(labels[0])
479
+ batch_input_embeds = self.pad_truncate_sequence(input_embeds, batch_first=True, padding_value=0.0, left_padding=left_padding)
480
+ batch_attention_mask = self.pad_truncate_sequence(attention_masks, batch_first=True, padding_value=False, left_padding=left_padding)
481
+ batch_labels = self.pad_truncate_sequence(labels, batch_first=True, padding_value=IGNORE_ID, left_padding=left_padding)
482
+
483
+ return visual_input_ids, batch_input_embeds, batch_labels, batch_attention_mask
484
+
485
+ def pad_truncate_sequence(self, sequences: List[torch.Tensor], batch_first: bool = True, padding_value: float = 0.0, left_padding: bool = False) -> torch.Tensor:
486
+ if left_padding == False:
487
+ pad_sequence = torch.nn.utils.rnn.pad_sequence(sequences, batch_first=batch_first, padding_value=padding_value)
488
+ return pad_sequence[:,:self.config.multimodal_max_length]
489
+ else:
490
+ pad_sequence = torch.nn.utils.rnn.pad_sequence([i.flip(dims=[0]) for i in sequences],batch_first=True, padding_value=padding_value).flip(dims=[1])
491
+ return pad_sequence[:,-self.config.multimodal_max_length:]
492
+
493
+ def preprocess_inputs(
494
+ self,
495
+ text_or_conversations: Union[List[Dict], str],
496
+ images: Optional[List[PIL.Image.Image]],
497
+ max_partition=9,
498
+ generation_preface='',
499
+ return_labels=False,
500
+ propagate_exception=True
501
+ ):
502
+ # convert text to conversations
503
+ if isinstance(text_or_conversations, str):
504
+ conversations = [{
505
+ "from": "human",
506
+ "value": text_or_conversations
507
+ }]
508
+ elif isinstance(text_or_conversations, list):
509
+ conversations = text_or_conversations
510
+ else:
511
+ raise ValueError(f'Invalid type of `text_or_conversations`, expected `List[Dict]` or `str`,'
512
+ f' but got {type(text_or_conversations)}')
513
+
514
+ # format conversations
515
+ prompt, raw_input_ids, raw_labels = self.get_conversation_formatter().format(
516
+ conversations, generation_preface=generation_preface)
517
+
518
+ # place image placeholders
519
+ input_ids = []
520
+ labels = []
521
+ pixel_values = []
522
+ invalidate_label = False
523
+ image_token_indices = [i for i, v in enumerate(raw_input_ids) if v == IMAGE_TOKEN_ID]
524
+ last_image_token_index = -1
525
+ for i in range(len(image_token_indices)):
526
+ head = 0 if i == 0 else image_token_indices[i - 1] + 1
527
+ tail = image_token_indices[i]
528
+ last_image_token_index = tail
529
+ input_ids.extend(raw_input_ids[head:tail])
530
+ labels.extend(raw_labels[head:tail])
531
+ try:
532
+ image = images[i]
533
+ raw_pixel_values, image_placeholders = self.visual_tokenizer.preprocess_image(
534
+ image, max_partition=max_partition)
535
+ except Exception as e:
536
+ if propagate_exception:
537
+ raise e
538
+ logging.exception(e)
539
+ invalidate_label = True
540
+ raw_pixel_values, image_placeholders = self.visual_tokenizer.mock_input()
541
+ input_ids.extend(image_placeholders)
542
+ labels.extend([IGNORE_ID] * len(image_placeholders))
543
+ pixel_values.append(raw_pixel_values)
544
+ input_ids.extend(raw_input_ids[last_image_token_index + 1:])
545
+ labels.extend(raw_labels[last_image_token_index + 1:])
546
+
547
+ # return tensors
548
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
549
+ labels = torch.tensor([IGNORE_ID] * len(labels) if invalidate_label else labels, dtype=torch.long)
550
+ pixel_values = torch.cat(pixel_values, dim=0) if len(pixel_values) > 0 else None
551
+
552
+ if return_labels:
553
+ return prompt, input_ids, pixel_values, labels
554
+ else:
555
+ return prompt, input_ids, pixel_values
556
+
557
+ def save_pretrained(
558
+ self,
559
+ save_directory: Union[str, os.PathLike],
560
+ is_main_process: bool = True,
561
+ state_dict: Optional[dict] = None,
562
+ save_function: Callable = torch.save,
563
+ push_to_hub: bool = False,
564
+ max_shard_size: Union[int, str] = "5GB",
565
+ safe_serialization: bool = True,
566
+ variant: Optional[str] = None,
567
+ token: Optional[Union[str, bool]] = None,
568
+ save_peft_format: bool = True,
569
+ **kwargs
570
+ ):
571
+ super().save_pretrained(save_directory,
572
+ is_main_process=is_main_process,
573
+ state_dict=state_dict,
574
+ save_function=save_function,
575
+ safe_serialization=safe_serialization)
576
+ self.get_text_tokenizer().save_pretrained(save_directory)
577
+ self.get_visual_tokenizer().get_image_processor().save_pretrained(save_directory)
578
+
579
+ def _get_hybrid_cache_for_llm(self, batch_size: int, max_cache_len: int):
580
+ cache_cls = HybridCache
581
+ llm = self.get_llm()
582
+
583
+ need_new_cache = (
584
+ not hasattr(llm, "_cache")
585
+ or (not isinstance(llm._cache, cache_cls))
586
+ or llm._cache.batch_size != batch_size
587
+ or llm._cache.max_cache_len < max_cache_len
588
+ )
589
+
590
+ if need_new_cache:
591
+ if hasattr(llm.config, "_pre_quantization_dtype"):
592
+ cache_dtype = llm.config._pre_quantization_dtype
593
+ else:
594
+ cache_dtype = llm.dtype
595
+ llm._cache = cache_cls(
596
+ config=llm.config,
597
+ batch_size=batch_size,
598
+ max_cache_len=max_cache_len,
599
+ device=llm.device,
600
+ dtype=cache_dtype,
601
+ )
602
+ else:
603
+ llm._cache.reset()
604
+ return llm._cache
605
+
606
+ # TODO: support batch generation
607
+ def generate(
608
+ self,
609
+ inputs: Optional[torch.Tensor] = None,
610
+ **kwargs
611
+ ) -> Union[GenerateOutput, torch.LongTensor]:
612
+ _, inputs_embeds, labels, attention_mask = self.merge_multimodal(
613
+ text_input_ids=inputs,
614
+ text_attention_masks=kwargs.pop('attention_mask'),
615
+ text_labels=None,
616
+ pixel_values=kwargs.pop('pixel_values'),
617
+ left_padding=True
618
+ )
619
+ if getattr(self.generation_config, 'cache_implementation') == 'hybrid': # mainly for Gemma2
620
+ kwargs['past_key_values'] = self._get_hybrid_cache_for_llm(
621
+ getattr(kwargs, "num_beams", inputs_embeds.shape[0]), kwargs['max_new_tokens'] + inputs_embeds.shape[-2])
622
+ self.get_llm()._supports_cache_class = True
623
+ kwargs['cache_implementation'] = None
624
+
625
+ return self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)