Xenova HF staff commited on
Commit
88f839d
·
verified ·
1 Parent(s): ee484f7

Delete configuration_florence2.py

Browse files
Files changed (1) hide show
  1. configuration_florence2.py +0 -340
configuration_florence2.py DELETED
@@ -1,340 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import warnings
15
- """ Florence-2 configuration"""
16
-
17
- from typing import Optional
18
-
19
- from transformers import AutoConfig
20
- from transformers.configuration_utils import PretrainedConfig
21
- from transformers.utils import logging
22
-
23
- logger = logging.get_logger(__name__)
24
-
25
- class Florence2VisionConfig(PretrainedConfig):
26
- r"""
27
- This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel
28
- according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
- defaults will yield a similar configuration to that of the Florence2VisionModel architecture.
30
-
31
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
- documentation from [`PretrainedConfig`] for more information.
33
-
34
- Args:
35
- drop_path_rate (`float`, *optional*, defaults to 0.1):
36
- The dropout rate of the drop path layer.
37
- patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
38
- The patch size of the image.
39
- patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
40
- The patch stride of the image.
41
- patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
42
- The patch padding of the image.
43
- patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
44
- Whether to apply layer normalization before the patch embedding layer.
45
- enable_checkpoint (`bool`, *optional*, defaults to False):
46
- Whether to enable checkpointing.
47
- dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
48
- The dimension of the embedding layer.
49
- num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
50
- The number of attention heads.
51
- num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
52
- The number of groups.
53
- depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
54
- The depth of the model.
55
- window_size (`int`, *optional*, defaults to 12):
56
- The window size of the model.
57
- projection_dim (`int`, *optional*, defaults to 1024):
58
- The dimension of the projection layer.
59
- visual_temporal_embedding (`dict`, *optional*):
60
- The configuration of the visual temporal embedding.
61
- image_pos_embed (`dict`, *optional*):
62
- The configuration of the image position embedding.
63
- image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
64
- The source of the image feature.
65
- Example:
66
-
67
- ```python
68
- >>> from transformers import Florence2VisionConfig, Florence2VisionModel
69
-
70
- >>> # Initializing a Florence2 Vision style configuration
71
- >>> configuration = Florence2VisionConfig()
72
-
73
- >>> # Initializing a model (with random weights)
74
- >>> model = Florence2VisionModel(configuration)
75
-
76
- >>> # Accessing the model configuration
77
- >>> configuration = model.config
78
- ```"""
79
-
80
- model_type = "florence2_vision"
81
- keys_to_ignore_at_inference = ["past_key_values"]
82
-
83
- def __init__(
84
- self,
85
- drop_path_rate=0.1,
86
- patch_size=[7, 3, 3, 3],
87
- patch_stride=[4, 2, 2, 2],
88
- patch_padding=[3, 1, 1, 1],
89
- patch_prenorm=[False, True, True, True],
90
- enable_checkpoint=False,
91
- dim_embed=[256, 512, 1024, 2048],
92
- num_heads=[8, 16, 32, 64],
93
- num_groups=[8, 16, 32, 64],
94
- depths=[1, 1, 9, 1],
95
- window_size=12,
96
- projection_dim=1024,
97
- visual_temporal_embedding=None,
98
- image_pos_embed=None,
99
- image_feature_source=["spatial_avg_pool", "temporal_avg_pool"],
100
- **kwargs,
101
- ):
102
- self.drop_path_rate = drop_path_rate
103
- self.patch_size = patch_size
104
- self.patch_stride = patch_stride
105
- self.patch_padding = patch_padding
106
- self.patch_prenorm = patch_prenorm
107
- self.enable_checkpoint = enable_checkpoint
108
- self.dim_embed = dim_embed
109
- self.num_heads = num_heads
110
- self.num_groups = num_groups
111
- self.depths = depths
112
- self.window_size = window_size
113
- self.projection_dim = projection_dim
114
- self.visual_temporal_embedding = visual_temporal_embedding
115
- self.image_pos_embed = image_pos_embed
116
- self.image_feature_source = image_feature_source
117
-
118
- super().__init__(**kwargs)
119
-
120
-
121
-
122
- class Florence2LanguageConfig(PretrainedConfig):
123
- r"""
124
- This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
125
- model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
126
- defaults will yield a similar configuration to that of the BART
127
- [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
128
-
129
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
130
- documentation from [`PretrainedConfig`] for more information.
131
-
132
-
133
- Args:
134
- vocab_size (`int`, *optional*, defaults to 51289):
135
- Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
136
- `inputs_ids` passed when calling [`Florence2LanguageModel`].
137
- d_model (`int`, *optional*, defaults to 1024):
138
- Dimensionality of the layers and the pooler layer.
139
- encoder_layers (`int`, *optional*, defaults to 12):
140
- Number of encoder layers.
141
- decoder_layers (`int`, *optional*, defaults to 12):
142
- Number of decoder layers.
143
- encoder_attention_heads (`int`, *optional*, defaults to 16):
144
- Number of attention heads for each attention layer in the Transformer encoder.
145
- decoder_attention_heads (`int`, *optional*, defaults to 16):
146
- Number of attention heads for each attention layer in the Transformer decoder.
147
- decoder_ffn_dim (`int`, *optional*, defaults to 4096):
148
- Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
149
- encoder_ffn_dim (`int`, *optional*, defaults to 4096):
150
- Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
151
- activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
152
- The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
153
- `"relu"`, `"silu"` and `"gelu_new"` are supported.
154
- dropout (`float`, *optional*, defaults to 0.1):
155
- The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
156
- attention_dropout (`float`, *optional*, defaults to 0.0):
157
- The dropout ratio for the attention probabilities.
158
- activation_dropout (`float`, *optional*, defaults to 0.0):
159
- The dropout ratio for activations inside the fully connected layer.
160
- classifier_dropout (`float`, *optional*, defaults to 0.0):
161
- The dropout ratio for classifier.
162
- max_position_embeddings (`int`, *optional*, defaults to 1024):
163
- The maximum sequence length that this model might ever be used with. Typically set this to something large
164
- just in case (e.g., 512 or 1024 or 2048).
165
- init_std (`float`, *optional*, defaults to 0.02):
166
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
167
- encoder_layerdrop (`float`, *optional*, defaults to 0.0):
168
- The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
169
- for more details.
170
- decoder_layerdrop (`float`, *optional*, defaults to 0.0):
171
- The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
172
- for more details.
173
- scale_embedding (`bool`, *optional*, defaults to `False`):
174
- Scale embeddings by diving by sqrt(d_model).
175
- use_cache (`bool`, *optional*, defaults to `True`):
176
- Whether or not the model should return the last key/values attentions (not used by all models).
177
- num_labels (`int`, *optional*, defaults to 3):
178
- The number of labels to use in [`Florence2LanguageForSequenceClassification`].
179
- forced_eos_token_id (`int`, *optional*, defaults to 2):
180
- The id of the token to force as the last generated token when `max_length` is reached. Usually set to
181
- `eos_token_id`.
182
-
183
- Example:
184
-
185
- ```python
186
- >>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
187
-
188
- >>> # Initializing a Florence2 Language style configuration
189
- >>> configuration = Florence2LanguageConfig()
190
-
191
- >>> # Initializing a model (with random weights)
192
- >>> model = Florence2LangaugeModel(configuration)
193
-
194
- >>> # Accessing the model configuration
195
- >>> configuration = model.config
196
- ```"""
197
-
198
- model_type = "florence2_language"
199
- keys_to_ignore_at_inference = ["past_key_values"]
200
- attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
201
-
202
- def __init__(
203
- self,
204
- vocab_size=51289,
205
- max_position_embeddings=1024,
206
- encoder_layers=12,
207
- encoder_ffn_dim=4096,
208
- encoder_attention_heads=16,
209
- decoder_layers=12,
210
- decoder_ffn_dim=4096,
211
- decoder_attention_heads=16,
212
- encoder_layerdrop=0.0,
213
- decoder_layerdrop=0.0,
214
- activation_function="gelu",
215
- d_model=1024,
216
- dropout=0.1,
217
- attention_dropout=0.0,
218
- activation_dropout=0.0,
219
- init_std=0.02,
220
- classifier_dropout=0.0,
221
- scale_embedding=False,
222
- use_cache=True,
223
- num_labels=3,
224
- pad_token_id=1,
225
- bos_token_id=0,
226
- eos_token_id=2,
227
- is_encoder_decoder=True,
228
- decoder_start_token_id=2,
229
- forced_eos_token_id=2,
230
- **kwargs,
231
- ):
232
- self.vocab_size = vocab_size
233
- self.max_position_embeddings = max_position_embeddings
234
- self.d_model = d_model
235
- self.encoder_ffn_dim = encoder_ffn_dim
236
- self.encoder_layers = encoder_layers
237
- self.encoder_attention_heads = encoder_attention_heads
238
- self.decoder_ffn_dim = decoder_ffn_dim
239
- self.decoder_layers = decoder_layers
240
- self.decoder_attention_heads = decoder_attention_heads
241
- self.dropout = dropout
242
- self.attention_dropout = attention_dropout
243
- self.activation_dropout = activation_dropout
244
- self.activation_function = activation_function
245
- self.init_std = init_std
246
- self.encoder_layerdrop = encoder_layerdrop
247
- self.decoder_layerdrop = decoder_layerdrop
248
- self.classifier_dropout = classifier_dropout
249
- self.use_cache = use_cache
250
- self.num_hidden_layers = encoder_layers
251
- self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
252
-
253
- super().__init__(
254
- num_labels=num_labels,
255
- pad_token_id=pad_token_id,
256
- bos_token_id=bos_token_id,
257
- eos_token_id=eos_token_id,
258
- is_encoder_decoder=is_encoder_decoder,
259
- decoder_start_token_id=decoder_start_token_id,
260
- forced_eos_token_id=forced_eos_token_id,
261
- **kwargs,
262
- )
263
-
264
- # ensure backward compatibility for BART CNN models
265
- if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
266
- self.forced_bos_token_id = self.bos_token_id
267
- warnings.warn(
268
- f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
269
- "The config can simply be saved and uploaded again to be fixed."
270
- )
271
-
272
- class Florence2Config(PretrainedConfig):
273
- r"""
274
- This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
275
- Florence-2 model according to the specified arguments, defining the model architecture.
276
-
277
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
278
- documentation from [`PretrainedConfig`] for more information.
279
-
280
- Args:
281
- vision_config (`Florence2VisionConfig`, *optional*):
282
- Custom vision config or dict
283
- text_config (`Union[AutoConfig, dict]`, *optional*):
284
- The config object of the text backbone.
285
- ignore_index (`int`, *optional*, defaults to -100):
286
- The ignore index for the loss function.
287
- vocab_size (`int`, *optional*, defaults to 51289):
288
- Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
289
- `inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
290
- projection_dim (`int`, *optional*, defaults to 1024):
291
- Dimension of the multimodal projection space.
292
-
293
- Example:
294
-
295
- ```python
296
- >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
297
-
298
- >>> # Initializing a clip-like vision config
299
- >>> vision_config = CLIPVisionConfig()
300
-
301
- >>> # Initializing a Bart config
302
- >>> text_config = BartConfig()
303
-
304
- >>> # Initializing a Florence-2 configuration
305
- >>> configuration = Florence2Config(vision_config, text_config)
306
-
307
- >>> # Initializing a model from the florence-2 configuration
308
- >>> model = Florence2ForConditionalGeneration(configuration)
309
-
310
- >>> # Accessing the model configuration
311
- >>> configuration = model.config
312
- ```"""
313
-
314
- model_type = "florence2"
315
- is_composition = False
316
-
317
- def __init__(
318
- self,
319
- vision_config=None,
320
- text_config=None,
321
- ignore_index=-100,
322
- vocab_size=51289,
323
- projection_dim=1024,
324
- **kwargs,
325
- ):
326
- self.ignore_index = ignore_index
327
- self.vocab_size = vocab_size
328
- self.projection_dim = projection_dim
329
- if vision_config is not None:
330
- vision_config = PretrainedConfig(**vision_config)
331
- self.vision_config = vision_config
332
- self.vocab_size = self.vocab_size
333
-
334
- self.text_config = text_config
335
- if text_config is not None:
336
- self.text_config = Florence2LanguageConfig(**text_config)
337
-
338
-
339
- super().__init__(**kwargs)
340
-