Guilherme34 commited on
Commit
040782e
·
verified ·
1 Parent(s): 94ceadf

Upload configuration_minicpm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_minicpm.py +210 -0
configuration_minicpm.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The OpenBMB Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from typing import Union
18
+
19
+ from transformers import PretrainedConfig
20
+ from transformers import Qwen2Config
21
+ from transformers import WhisperConfig
22
+ from transformers.utils import logging
23
+
24
+ from .modeling_navit_siglip import SiglipVisionConfig
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class MiniCPMVSliceConfig(PretrainedConfig):
30
+ model_type = "minicpmv"
31
+
32
+ def __init__(
33
+ self,
34
+ patch_size=14,
35
+ max_slice_nums=9,
36
+ scale_resolution=448,
37
+ **kwargs,
38
+ ):
39
+ super().__init__(**kwargs)
40
+ self.patch_size = patch_size
41
+ self.max_slice_nums = max_slice_nums
42
+ self.scale_resolution = scale_resolution
43
+
44
+ @classmethod
45
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
46
+ cls._set_token_in_kwargs(kwargs)
47
+
48
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
49
+
50
+ if config_dict.get("model_type") == "minicpmv":
51
+ config_dict = config_dict["slice_config"]
52
+
53
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
54
+ logger.warning(
55
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
56
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
57
+ )
58
+
59
+ return cls.from_dict(config_dict, **kwargs)
60
+
61
+
62
+ class ConditionalChatTTSConfig(PretrainedConfig):
63
+ model_type = "conditional_chattts"
64
+
65
+ def __init__(
66
+ self,
67
+ llm_dim: int = 2560,
68
+ hidden_size: int = 768,
69
+ intermediate_size: int = 3072,
70
+ num_attention_heads: int = 12,
71
+ num_hidden_layers: int = 20,
72
+ max_position_embeddings: int = 4096,
73
+ num_audio_tokens: int = 626,
74
+ num_text_tokens: int = 21178,
75
+ num_mel_bins: int = 100,
76
+ num_vq: int = 4,
77
+ use_speaker_embedding: bool = True,
78
+ use_llm_hidden_state: bool = False,
79
+ spk_emb_token_id: int = 21143,
80
+ num_spk_embs: int = 1,
81
+ audio_bos_token_id: int = 21132,
82
+ text_eos_token_id: int = 21133,
83
+ use_text: bool = True,
84
+ streaming: bool = True,
85
+ streaming_text_chunk_size: int = 10,
86
+ streaming_text_reserved_len: int = 300,
87
+ streaming_audio_chunk_size: int = 50,
88
+ attn_implementation: str = "sdpa",
89
+ use_mlp: bool = True,
90
+ aug_loss_weight: bool = True,
91
+ do_sample: bool = True,
92
+ top_p: float = 0.7,
93
+ top_k: int = 20,
94
+ repetition_penalty: float = 1.0,
95
+ **kwargs,
96
+ ):
97
+ super().__init__(**kwargs)
98
+
99
+ self.llm_dim = llm_dim
100
+ self.hidden_size = hidden_size
101
+ self.intermediate_size = intermediate_size
102
+ self.num_attention_heads = num_attention_heads
103
+ self.num_hidden_layers = num_hidden_layers
104
+ self.max_position_embeddings = max_position_embeddings
105
+ self.num_audio_tokens = num_audio_tokens
106
+ self.num_text_tokens = num_text_tokens
107
+ self.num_mel_bins = num_mel_bins
108
+ self.num_vq = num_vq
109
+ self.use_speaker_embedding = use_speaker_embedding
110
+ self.use_llm_hidden_state = use_llm_hidden_state
111
+ self.spk_emb_token_id = spk_emb_token_id
112
+ self.num_spk_embs = num_spk_embs
113
+ self.audio_bos_token_id = audio_bos_token_id
114
+ self.text_eos_token_id = text_eos_token_id
115
+ self.use_text = use_text
116
+ self.streaming = streaming
117
+ self.streaming_text_chunk_size = streaming_text_chunk_size
118
+ self.streaming_text_reserved_len = streaming_text_reserved_len
119
+ self.streaming_audio_chunk_size = streaming_audio_chunk_size
120
+ self.attn_implementation = attn_implementation
121
+ self.use_mlp = use_mlp
122
+ self.aug_loss_weight = aug_loss_weight
123
+ self.do_sample = do_sample
124
+ self.top_p = top_p
125
+ self.top_k = top_k
126
+ self.repetition_penalty = repetition_penalty
127
+
128
+
129
+ class MiniCPMOConfig(Qwen2Config):
130
+ model_type = "minicpmo"
131
+ keys_to_ignore_at_inference = ["past_key_values"]
132
+
133
+ default_vision_config = {
134
+ "hidden_size": 1152,
135
+ "image_size": 980,
136
+ "intermediate_size": 4304,
137
+ "model_type": "siglip",
138
+ "num_attention_heads": 16,
139
+ "num_hidden_layers": 27,
140
+ "patch_size": 14,
141
+ }
142
+
143
+ def __init__(
144
+ self,
145
+ use_cache=True,
146
+ query_num=64,
147
+ image_size=448,
148
+ drop_vision_last_layer=True,
149
+ batch_vision_input=True,
150
+ slice_config=None,
151
+ vision_config=None,
152
+ audio_config=None,
153
+ tts_config=None,
154
+ use_image_id=True,
155
+ vision_batch_size=16,
156
+ audio_pool_step=2,
157
+ audio_chunk_length=1.0,
158
+ stream_input=False,
159
+ init_vision=True,
160
+ init_audio=True,
161
+ init_tts=True,
162
+ **kwargs,
163
+ ):
164
+ self.use_cache = use_cache
165
+ self.query_num = query_num
166
+ self.image_size = image_size
167
+ self.drop_vision_last_layer = drop_vision_last_layer
168
+ self.batch_vision_input = batch_vision_input
169
+ self.use_image_id = use_image_id
170
+ self.vision_batch_size = vision_batch_size
171
+ self.audio_pool_step = audio_pool_step
172
+ self.audio_chunk_length = audio_chunk_length
173
+ self.stream_input = stream_input
174
+ self.init_vision = init_vision
175
+ self.init_audio = init_audio
176
+ self.init_tts = init_tts
177
+
178
+ if slice_config is None:
179
+ self.slice_config = MiniCPMVSliceConfig(max_slice_nums=1)
180
+ else:
181
+ self.slice_config = MiniCPMVSliceConfig(**slice_config)
182
+ self.slice_mode = True
183
+
184
+ # same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit add tgt_sizes
185
+ if vision_config is None:
186
+ self.vision_config = SiglipVisionConfig(**self.default_vision_config)
187
+ logger.info("vision_config is None, using default vision config")
188
+ elif isinstance(vision_config, dict):
189
+ self.vision_config = SiglipVisionConfig(**vision_config)
190
+ elif isinstance(vision_config, SiglipVisionConfig):
191
+ self.vision_config = vision_config
192
+
193
+ # same as openai/whisper-medium add use_cache
194
+ if audio_config is None:
195
+ self.audio_config = WhisperConfig()
196
+ elif isinstance(audio_config, dict):
197
+ self.audio_config = WhisperConfig(**audio_config)
198
+ elif isinstance(audio_config, WhisperConfig):
199
+ self.audio_config = audio_config
200
+
201
+ if tts_config is None:
202
+ self.tts_config = ConditionalChatTTSConfig()
203
+ elif isinstance(tts_config, dict):
204
+ self.tts_config = ConditionalChatTTSConfig(**tts_config)
205
+ elif isinstance(tts_config, ConditionalChatTTSConfig):
206
+ self.tts_config = tts_config
207
+
208
+ self.patch_size = self.vision_config.patch_size
209
+
210
+ super().__init__(**kwargs)