Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -0
- LICENSE +335 -0
- Qwen-VL-Chat/.gitattributes +37 -0
- Qwen-VL-Chat/README.md +727 -0
- Qwen-VL-Chat/SimSun.ttf +3 -0
- Qwen-VL-Chat/config.json +49 -0
- Qwen-VL-Chat/configuration_qwen.py +65 -0
- Qwen-VL-Chat/generation_config.json +11 -0
- Qwen-VL-Chat/modeling_qwen.py +1162 -0
- Qwen-VL-Chat/pytorch_model-00001-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00002-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00003-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00004-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00005-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00006-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00007-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00008-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00009-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model-00010-of-00010.bin +3 -0
- Qwen-VL-Chat/pytorch_model.bin.index.json +860 -0
- Qwen-VL-Chat/qwen.tiktoken +0 -0
- Qwen-VL-Chat/qwen_generation_utils.py +420 -0
- Qwen-VL-Chat/tokenization_qwen.py +598 -0
- Qwen-VL-Chat/tokenizer_config.json +10 -0
- Qwen-VL-Chat/visual.py +426 -0
- QwenViT/qwen_vit_G.pt +3 -0
- SEED-X-17B/README.md +105 -0
- cvlm_llama2_tokenizer_100img_and_224loc_addpatch/added_tokens.json +332 -0
- cvlm_llama2_tokenizer_100img_and_224loc_addpatch/special_tokens_map.json +24 -0
- cvlm_llama2_tokenizer_100img_and_224loc_addpatch/tokenizer.model +3 -0
- cvlm_llama2_tokenizer_100img_and_224loc_addpatch/tokenizer_config.json +34 -0
- seed_detokenizer/first_stage/pytorch_model.bin +3 -0
- seed_detokenizer/second_stage/pytorch_model.bin +3 -0
- seed_x/agent/pytorch_model.bin +3 -0
- seed_x/llm/config.json +26 -0
- seed_x/llm/generation_config.json +9 -0
- seed_x/llm/pytorch_model-00001-of-00006.bin +3 -0
- seed_x/llm/pytorch_model-00002-of-00006.bin +3 -0
- seed_x/llm/pytorch_model-00003-of-00006.bin +3 -0
- seed_x/llm/pytorch_model-00004-of-00006.bin +3 -0
- seed_x/llm/pytorch_model-00005-of-00006.bin +3 -0
- seed_x/llm/pytorch_model-00006-of-00006.bin +3 -0
- seed_x/llm/pytorch_model.bin.index.json +410 -0
- seed_x_edit/agent/pytorch_model.bin +3 -0
- seed_x_edit/llm/config.json +26 -0
- seed_x_edit/llm/generation_config.json +9 -0
- seed_x_edit/llm/pytorch_model-00001-of-00006.bin +3 -0
- seed_x_edit/llm/pytorch_model-00002-of-00006.bin +3 -0
- seed_x_edit/llm/pytorch_model-00003-of-00006.bin +3 -0
- seed_x_edit/llm/pytorch_model-00004-of-00006.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Qwen-VL-Chat/SimSun.ttf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
stable-diffusion-xl-base-1.0/01.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
stable-diffusion-xl-base-1.0/comparison.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
stable-diffusion-xl-base-1.0/text_encoder_2/model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
40 |
+
stable-diffusion-xl-base-1.0/unet/model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
41 |
+
stable-diffusion-xl-base-1.0/unet/openvino_model.xml filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Tencent is pleased to support the open source community by making Seed-X available.
|
2 |
+
|
3 |
+
Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
4 |
+
|
5 |
+
Seed-X is licensed under the Apache License Version 2.0 except for the third-party components listed below.
|
6 |
+
|
7 |
+
|
8 |
+
Terms of the Apache License Version 2.0:
|
9 |
+
--------------------------------------------------------------------
|
10 |
+
Apache License
|
11 |
+
|
12 |
+
Version 2.0, January 2004
|
13 |
+
|
14 |
+
http://www.apache.org/licenses/
|
15 |
+
|
16 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
17 |
+
1. Definitions.
|
18 |
+
|
19 |
+
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
20 |
+
|
21 |
+
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
22 |
+
|
23 |
+
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
24 |
+
|
25 |
+
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
26 |
+
|
27 |
+
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
28 |
+
|
29 |
+
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
30 |
+
|
31 |
+
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
32 |
+
|
33 |
+
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
34 |
+
|
35 |
+
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
36 |
+
|
37 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
38 |
+
|
39 |
+
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
40 |
+
|
41 |
+
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
42 |
+
|
43 |
+
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
44 |
+
|
45 |
+
You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
46 |
+
|
47 |
+
You must cause any modified files to carry prominent notices stating that You changed the files; and
|
48 |
+
|
49 |
+
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
50 |
+
|
51 |
+
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
52 |
+
|
53 |
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
54 |
+
|
55 |
+
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
56 |
+
|
57 |
+
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
58 |
+
|
59 |
+
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
60 |
+
|
61 |
+
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
62 |
+
|
63 |
+
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
64 |
+
|
65 |
+
END OF TERMS AND CONDITIONS
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
Other dependencies and licenses:
|
70 |
+
|
71 |
+
|
72 |
+
Open Source Software Licensed under the Apache License Version 2.0:
|
73 |
+
--------------------------------------------------------------------
|
74 |
+
1. transformers
|
75 |
+
Copyright 2018- The Hugging Face team. All rights reserved.
|
76 |
+
Source code of this software can be obtained from: https://github.com/huggingface/transformers/blob/v4.30.2/
|
77 |
+
|
78 |
+
2. diffusers
|
79 |
+
Copyright 2023 The HuggingFace Team. All rights reserved.
|
80 |
+
Source code of this software can be obtained from: https://github.com/huggingface/diffusers/blob/v0.25.0/
|
81 |
+
|
82 |
+
A copy of Apache 2.0 has been included in this file.
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
Open Source Software Licensed under the BSD 3-Clause License:
|
87 |
+
--------------------------------------------------------------------
|
88 |
+
1. torchvision
|
89 |
+
Copyright (c) Soumith Chintala 2016,
|
90 |
+
All rights reserved.
|
91 |
+
|
92 |
+
Terms of the BSD 3-Clause License:
|
93 |
+
--------------------------------------------------------------------
|
94 |
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
95 |
+
|
96 |
+
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
97 |
+
|
98 |
+
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
99 |
+
|
100 |
+
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
101 |
+
|
102 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
Open Source Software Licensed under the BSD 3-Clause License and Other Licenses of the Third-Party Components therein:
|
107 |
+
--------------------------------------------------------------------
|
108 |
+
1. numpy
|
109 |
+
Copyright (c) 2005-2021, NumPy Developers.
|
110 |
+
All rights reserved.
|
111 |
+
|
112 |
+
A copy of the BSD 3-Clause License is included in this file.
|
113 |
+
|
114 |
+
For the license of other third party components, please refer to the following URL:
|
115 |
+
https://github.com/numpy/numpy/blob/v1.20.1/LICENSES_bundled.txt
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
Open Source Software Licensed under the BSD 3-Clause License and Other Licenses of the Third-Party Components therein:
|
120 |
+
--------------------------------------------------------------------
|
121 |
+
1. torch
|
122 |
+
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
|
123 |
+
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
|
124 |
+
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
|
125 |
+
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
|
126 |
+
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
|
127 |
+
Copyright (c) 2011-2013 NYU (Clement Farabet)
|
128 |
+
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
|
129 |
+
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
|
130 |
+
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
|
131 |
+
|
132 |
+
A copy of the BSD 3-Clause License is included in this file.
|
133 |
+
|
134 |
+
For the license of other third party components, please refer to the following URL:
|
135 |
+
https://github.com/pytorch/pytorch/blob/v2.0.1/NOTICE
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
Open Source Software Licensed under the LLAMA 2 Community License:
|
140 |
+
--------------------------------------------------------------------
|
141 |
+
1. Llama 2
|
142 |
+
Copyright (c) Meta Platforms, Inc. All Rights Reserved.
|
143 |
+
|
144 |
+
|
145 |
+
Terms of the LLAMA 2 COMMUNITY LICENSE AGREEMENT:
|
146 |
+
--------------------------------------------------------------------
|
147 |
+
LLAMA 2 COMMUNITY LICENSE AGREEMENT
|
148 |
+
Llama 2 Version Release Date: July 18, 2023
|
149 |
+
|
150 |
+
"Agreement" means the terms and conditions for use, reproduction, distribution and
|
151 |
+
modification of the Llama Materials set forth herein.
|
152 |
+
|
153 |
+
"Documentation" means the specifications, manuals and documentation
|
154 |
+
accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
|
155 |
+
libraries/llama-downloads/.
|
156 |
+
|
157 |
+
"Licensee" or "you" means you, or your employer or any other person or entity (if
|
158 |
+
you are entering into this Agreement on such person or entity's behalf), of the age
|
159 |
+
required under applicable laws, rules or regulations to provide legal consent and that
|
160 |
+
has legal authority to bind your employer or such other person or entity if you are
|
161 |
+
entering in this Agreement on their behalf.
|
162 |
+
|
163 |
+
"Llama 2" means the foundational large language models and software and
|
164 |
+
algorithms, including machine-learning model code, trained model weights,
|
165 |
+
inference-enabling code, training-enabling code, fine-tuning enabling code and other
|
166 |
+
elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
|
167 |
+
libraries/llama-downloads/.
|
168 |
+
|
169 |
+
"Llama Materials" means, collectively, Meta's proprietary Llama 2 and
|
170 |
+
Documentation (and any portion thereof) made available under this Agreement.
|
171 |
+
|
172 |
+
"Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you
|
173 |
+
are an entity, your principal place of business is in the EEA or Switzerland) and Meta
|
174 |
+
Platforms, Inc. (if you are located outside of the EEA or Switzerland).
|
175 |
+
|
176 |
+
By clicking "I Accept" below or by using or distributing any portion or element of the
|
177 |
+
Llama Materials, you agree to be bound by this Agreement.
|
178 |
+
|
179 |
+
1. License Rights and Redistribution.
|
180 |
+
|
181 |
+
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
|
182 |
+
transferable and royalty-free limited license under Meta's intellectual property or
|
183 |
+
other rights owned by Meta embodied in the Llama Materials to use, reproduce,
|
184 |
+
distribute, copy, create derivative works of, and make modifications to the Llama
|
185 |
+
Materials.
|
186 |
+
|
187 |
+
b. Redistribution and Use.
|
188 |
+
|
189 |
+
i. If you distribute or make the Llama Materials, or any derivative works
|
190 |
+
thereof, available to a third party, you shall provide a copy of this Agreement to such
|
191 |
+
third party.
|
192 |
+
ii. If you receive Llama Materials, or any derivative works thereof, from
|
193 |
+
a Licensee as part of an integrated end user product, then Section 2 of this
|
194 |
+
Agreement will not apply to you.
|
195 |
+
|
196 |
+
iii. You must retain in all copies of the Llama Materials that you
|
197 |
+
distribute the following attribution notice within a "Notice" text file distributed as a
|
198 |
+
part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License,
|
199 |
+
Copyright (c) Meta Platforms, Inc. All Rights Reserved."
|
200 |
+
|
201 |
+
iv. Your use of the Llama Materials must comply with applicable laws
|
202 |
+
and regulations (including trade compliance laws and regulations) and adhere to the
|
203 |
+
Acceptable Use Policy for the Llama Materials (available at
|
204 |
+
https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into
|
205 |
+
this Agreement.
|
206 |
+
|
207 |
+
v. You will not use the Llama Materials or any output or results of the
|
208 |
+
Llama Materials to improve any other large language model (excluding Llama 2 or
|
209 |
+
derivative works thereof).
|
210 |
+
|
211 |
+
2. Additional Commercial Terms. If, on the Llama 2 version release date, the
|
212 |
+
monthly active users of the products or services made available by or for Licensee,
|
213 |
+
or Licensee's affiliates, is greater than 700 million monthly active users in the
|
214 |
+
preceding calendar month, you must request a license from Meta, which Meta may
|
215 |
+
grant to you in its sole discretion, and you are not authorized to exercise any of the
|
216 |
+
rights under this Agreement unless or until Meta otherwise expressly grants you
|
217 |
+
such rights.
|
218 |
+
|
219 |
+
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE
|
220 |
+
LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE
|
221 |
+
PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
222 |
+
EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY
|
223 |
+
WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR
|
224 |
+
FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
|
225 |
+
FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
|
226 |
+
THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
|
227 |
+
USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
|
228 |
+
|
229 |
+
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE
|
230 |
+
LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT,
|
231 |
+
NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS
|
232 |
+
AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL,
|
233 |
+
CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
|
234 |
+
IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF
|
235 |
+
ANY OF THE FOREGOING.
|
236 |
+
|
237 |
+
5. Intellectual Property.
|
238 |
+
|
239 |
+
a. No trademark licenses are granted under this Agreement, and in
|
240 |
+
connection with the Llama Materials, neither Meta nor Licensee may use any name
|
241 |
+
or mark owned by or associated with the other or any of its affiliates, except as
|
242 |
+
required for reasonable and customary use in describing and redistributing the
|
243 |
+
Llama Materials.
|
244 |
+
|
245 |
+
b. Subject to Meta's ownership of Llama Materials and derivatives made by or
|
246 |
+
for Meta, with respect to any derivative works and modifications of the Llama
|
247 |
+
Materials that are made by you, as between you and Meta, you are and will be the
|
248 |
+
owner of such derivative works and modifications.
|
249 |
+
|
250 |
+
c. If you institute litigation or other proceedings against Meta or any entity
|
251 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that the Llama
|
252 |
+
Materials or Llama 2 outputs or results, or any portion of any of the foregoing,
|
253 |
+
constitutes infringement of intellectual property or other rights owned or licensable
|
254 |
+
by you, then any licenses granted to you under this Agreement shall terminate as of
|
255 |
+
the date such litigation or claim is filed or instituted. You will indemnify and hold
|
256 |
+
harmless Meta from and against any claim by any third party arising out of or related
|
257 |
+
to your use or distribution of the Llama Materials.
|
258 |
+
|
259 |
+
6. Term and Termination. The term of this Agreement will commence upon your
|
260 |
+
acceptance of this Agreement or access to the Llama Materials and will continue in
|
261 |
+
full force and effect until terminated in accordance with the terms and conditions
|
262 |
+
herein. Meta may terminate this Agreement if you are in breach of any term or
|
263 |
+
condition of this Agreement. Upon termination of this Agreement, you shall delete
|
264 |
+
and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the
|
265 |
+
termination of this Agreement.
|
266 |
+
|
267 |
+
7. Governing Law and Jurisdiction. This Agreement will be governed and
|
268 |
+
construed under the laws of the State of California without regard to choice of law
|
269 |
+
principles, and the UN Convention on Contracts for the International Sale of Goods
|
270 |
+
does not apply to this Agreement. The courts of California shall have exclusive
|
271 |
+
jurisdiction of any dispute arising out of this Agreement.
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
Open Source Software Licensed under the Tongyi Qianwen LICENSE AGREEMENT:
|
276 |
+
--------------------------------------------------------------------
|
277 |
+
1. Qwen-VL
|
278 |
+
Copyright (c) Alibaba Cloud. All Rights Reserved.
|
279 |
+
|
280 |
+
|
281 |
+
Terms of the Tongyi Qianwen LICENSE AGREEMENT:
|
282 |
+
--------------------------------------------------------------------
|
283 |
+
Tongyi Qianwen LICENSE AGREEMENT
|
284 |
+
|
285 |
+
Tongyi Qianwen Release Date: August 23, 2023
|
286 |
+
|
287 |
+
By clicking to agree or by using or distributing any portion or element of the Tongyi Qianwen Materials, you will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
|
288 |
+
|
289 |
+
1. Definitions
|
290 |
+
a. This Tongyi Qianwen LICENSE AGREEMENT (this "Agreement") shall mean the terms and conditions for use, reproduction, distribution and modification of the Materials as defined by this Agreement.
|
291 |
+
b. "We"(or "Us") shall mean Alibaba Cloud.
|
292 |
+
c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
|
293 |
+
d. "Third Parties" shall mean individuals or legal entities that are not under common control with Us or You.
|
294 |
+
e. "Tongyi Qianwen" shall mean the large language models (including Qwen-VL model and Qwen-VL-Chat model), and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Us.
|
295 |
+
f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Tongyi Qianwen and Documentation (and any portion thereof) made available under this Agreement.
|
296 |
+
g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
|
297 |
+
h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation,
|
298 |
+
and conversions to other media types.
|
299 |
+
|
300 |
+
2. Grant of Rights
|
301 |
+
You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Alibaba Cloud's intellectual property or other rights owned by Us embodied in the Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Materials.
|
302 |
+
|
303 |
+
3. Redistribution
|
304 |
+
You may reproduce and distribute copies of the Materials or derivative works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
305 |
+
a. You shall give any other recipients of the Materials or derivative works a copy of this Agreement;
|
306 |
+
b. You shall cause any modified files to carry prominent notices stating that You changed the files;
|
307 |
+
c. You shall retain in all copies of the Materials that You distribute the following attribution notices within a "Notice" text file distributed as a part of such copies: "Tongyi Qianwen is licensed under the Tongyi Qianwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved."; and
|
308 |
+
d. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such derivative works as a whole, provided Your use, reproduction, and distribution of the work otherwise complies with the terms and conditions of this Agreement.
|
309 |
+
|
310 |
+
4. Restrictions
|
311 |
+
If you are commercially using the Materials, and your product or service has more than 100 million monthly active users, You shall request a license from Us. You cannot exercise your rights under this Agreement without our express authorization.
|
312 |
+
|
313 |
+
5. Rules of use
|
314 |
+
a. The Materials may be subject to export controls or restrictions in China, the United States or other countries or regions. You shall comply with applicable laws and regulations in your use of the Materials.
|
315 |
+
b. You can not use the Materials or any output therefrom to improve any other large language model (excluding Tongyi Qianwen or derivative works thereof).
|
316 |
+
|
317 |
+
6. Intellectual Property
|
318 |
+
a. We retain ownership of all intellectual property rights in and to the Materials and derivatives made by or for Us. Conditioned upon compliance with the terms and conditions of this Agreement, with respect to any derivative works and modifications of the Materials that are made by you, you are and will be the owner of such derivative works and modifications.
|
319 |
+
b. No trademark license is granted to use the trade names, trademarks, service marks, or product names of Us, except as required to fulfill notice requirements under this Agreement or as required for reasonable and customary use in describing and redistributing the Materials.
|
320 |
+
c. If you commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any entity alleging that the Materials or any output therefrom, or any part of the foregoing, infringe any intellectual property or other right owned or licensable by you, then all licences granted to you under this Agreement shall terminate as of the date such lawsuit or other proceeding is commenced or brought.
|
321 |
+
|
322 |
+
7. Disclaimer of Warranty and Limitation of Liability
|
323 |
+
|
324 |
+
a. We are not obligated to support, update, provide training for, or develop any further version of the Tongyi Qianwen Materials or to grant any license thereto.
|
325 |
+
b. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. WE MAKE NO WARRANTY AND ASSUME NO RESPONSIBILITY FOR THE SAFETY OR STABILITY OF THE MATERIALS AND ANY OUTPUT THEREFROM.
|
326 |
+
c. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MATERIALS OR ANY OUTPUT OF IT, NO MATTER HOW IT’S CAUSED.
|
327 |
+
d. You will defend, indemnify and hold harmless Us from and against any claim by any third party arising out of or related to your use or distribution of the Materials.
|
328 |
+
|
329 |
+
8. Survival and Termination.
|
330 |
+
a. The term of this Agreement shall commence upon your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
|
331 |
+
b. We may terminate this Agreement if you breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, you must delete and cease use of the Materials. Sections 7 and 9 shall survive the termination of this Agreement.
|
332 |
+
|
333 |
+
9. Governing Law and Jurisdiction.
|
334 |
+
a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
|
335 |
+
b. The People's Courts in Hangzhou City shall have exclusive jurisdiction over any dispute arising out of this Agreement.
|
Qwen-VL-Chat/.gitattributes
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
SimSun.ttf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
assets/apple.jpeg filter=lfs diff=lfs merge=lfs -text
|
Qwen-VL-Chat/README.md
ADDED
@@ -0,0 +1,727 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- zh
|
4 |
+
- en
|
5 |
+
tags:
|
6 |
+
- qwen
|
7 |
+
pipeline_tag: text-generation
|
8 |
+
inference: false
|
9 |
+
---
|
10 |
+
|
11 |
+
# Qwen-VL-Chat
|
12 |
+
|
13 |
+
<br>
|
14 |
+
|
15 |
+
<p align="center">
|
16 |
+
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_vl.jpg" width="400"/>
|
17 |
+
<p>
|
18 |
+
<br>
|
19 |
+
|
20 |
+
<p align="center">
|
21 |
+
Qwen-VL
|
22 |
+
<a href="https://huggingface.co/Qwen/Qwen-VL">🤗</a>
|
23 |
+
<a href="https://modelscope.cn/models/qwen/Qwen-VL/summary">🤖</a>  |
|
24 |
+
Qwen-VL-Chat
|
25 |
+
<a href="https://huggingface.co/Qwen/Qwen-VL-Chat">🤗</a>
|
26 |
+
<a href="https://modelscope.cn/models/qwen/Qwen-VL-Chat/summary">🤖</a> 
|
27 |
+
(Int4:
|
28 |
+
<a href="https://huggingface.co/Qwen/Qwen-VL-Chat-Int4">🤗</a>
|
29 |
+
<a href="https://modelscope.cn/models/qwen/Qwen-VL-Chat-Int4/summary">🤖</a> ) |
|
30 |
+
Qwen-VL-Plus
|
31 |
+
<a href="https://huggingface.co/spaces/Qwen/Qwen-VL-Plus">🤗</a>
|
32 |
+
<a href="https://modelscope.cn/studios/qwen/Qwen-VL-Chat-Demo/summary">🤖</a>  |
|
33 |
+
Qwen-VL-Max
|
34 |
+
<a href="https://huggingface.co/spaces/Qwen/Qwen-VL-Max">🤗</a>
|
35 |
+
<a href="https://modelscope.cn/studios/qwen/Qwen-VL-Max/summary">🤖</a> 
|
36 |
+
<br>
|
37 |
+
<a href="https://tongyi.aliyun.com/qianwen">Web</a>   |   
|
38 |
+
<a href="https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start">API</a>   |   
|
39 |
+
<a href="assets/wechat.png">WeChat</a>   |   
|
40 |
+
<a href="https://discord.gg/z3GAxXZ9Ce">Discord</a>   |   
|
41 |
+
<a href="https://arxiv.org/abs/2308.12966">Paper</a>   |   
|
42 |
+
<a href="TUTORIAL.md">Tutorial</a>
|
43 |
+
</p>
|
44 |
+
<br>
|
45 |
+
|
46 |
+
**Qwen-VL** 是阿里云研发的大规模视觉语言模型(Large Vision Language Model, LVLM)。Qwen-VL 可以以图像、文本、检测框作为输入,并以文本和检测框作为输出。Qwen-VL 系列模型性能强大,具备多语言对话、多图交错对话等能力,并支持中文开放域定位和细粒度图像识别与理解。
|
47 |
+
|
48 |
+
**Qwen-VL** (Qwen Large Vision Language Model) is the visual multimodal version of the large model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-VL accepts image, text, and bounding box as inputs, outputs text and bounding box. The features of Qwen-VL include:
|
49 |
+
|
50 |
+
目前,我们提供了Qwen-VL和Qwen-VL-Chat两个模型,分别为预训练模型和Chat模型。如果想了解更多关于模型的信息,请点击[链接](https://github.com/QwenLM/Qwen-VL/blob/master/visual_memo.md)查看我们的技术备忘录。本仓库为Qwen-VL-Chat仓库。
|
51 |
+
|
52 |
+
We release Qwen-VL and Qwen-VL-Chat, which are pretrained model and Chat model respectively. For more details about Qwen-VL, please refer to our [technical memo](https://github.com/QwenLM/Qwen-VL/blob/master/visual_memo.md). This repo is the one for Qwen-VL-Chat.
|
53 |
+
<br>
|
54 |
+
|
55 |
+
## 安装要求 (Requirements)
|
56 |
+
|
57 |
+
* python 3.8及以上版本
|
58 |
+
* pytorch 1.12及以上版本,推荐2.0及以上版本
|
59 |
+
* 建议使用CUDA 11.4及以上(GPU用户需考虑此选项)
|
60 |
+
* python 3.8 and above
|
61 |
+
* pytorch 1.12 and above, 2.0 and above are recommended
|
62 |
+
* CUDA 11.4 and above are recommended (this is for GPU users)
|
63 |
+
<br>
|
64 |
+
|
65 |
+
## 快速开始 (Quickstart)
|
66 |
+
|
67 |
+
我们提供简单的示例来说明如何利用 🤗 Transformers 快速使用Qwen-VL-Chat。
|
68 |
+
|
69 |
+
在开始前,请确保你已经配置好环境并安装好相关的代码包。最重要的是,确保你满足上述要求,然后安装相关的依赖库。
|
70 |
+
|
71 |
+
Below, we provide simple examples to show how to use Qwen-VL-Chat with 🤗 Transformers.
|
72 |
+
|
73 |
+
Before running the code, make sure you have setup the environment and installed the required packages. Make sure you meet the above requirements, and then install the dependent libraries.
|
74 |
+
|
75 |
+
```bash
|
76 |
+
pip install -r requirements.txt
|
77 |
+
```
|
78 |
+
|
79 |
+
接下来你可以开始使用Transformers来使用我们的模型。关于视觉模块的更多用法,请参考[教程](TUTORIAL.md)。
|
80 |
+
|
81 |
+
Now you can start with Transformers. More usage aboue vision encoder, please refer to [tutorial](TUTORIAL_zh.md).
|
82 |
+
|
83 |
+
#### 🤗 Transformers
|
84 |
+
|
85 |
+
To use Qwen-VL-Chat for the inference, all you need to do is to input a few lines of codes as demonstrated below. However, **please make sure that you are using the latest code.**
|
86 |
+
|
87 |
+
```python
|
88 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
89 |
+
from transformers.generation import GenerationConfig
|
90 |
+
import torch
|
91 |
+
torch.manual_seed(1234)
|
92 |
+
|
93 |
+
# Note: The default behavior now has injection attack prevention off.
|
94 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)
|
95 |
+
|
96 |
+
# use bf16
|
97 |
+
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
|
98 |
+
# use fp16
|
99 |
+
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
|
100 |
+
# use cpu only
|
101 |
+
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="cpu", trust_remote_code=True).eval()
|
102 |
+
# use cuda device
|
103 |
+
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="cuda", trust_remote_code=True).eval()
|
104 |
+
|
105 |
+
# Specify hyperparameters for generation (No need to do this if you are using transformers>=4.32.0)
|
106 |
+
# model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)
|
107 |
+
|
108 |
+
# 1st dialogue turn
|
109 |
+
query = tokenizer.from_list_format([
|
110 |
+
{'image': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'},
|
111 |
+
{'text': '这是什么'},
|
112 |
+
])
|
113 |
+
response, history = model.chat(tokenizer, query=query, history=None)
|
114 |
+
print(response)
|
115 |
+
# 图中是一名年轻女子在沙滩上和她的狗玩耍,狗的品种可能是拉布拉多。她们坐在沙滩上,狗的前腿抬起来,似乎在和人类击掌。两人之间充满了信任和爱。
|
116 |
+
|
117 |
+
# 2nd dialogue turn
|
118 |
+
response, history = model.chat(tokenizer, '输出"击掌"的检测框', history=history)
|
119 |
+
print(response)
|
120 |
+
# <ref>击掌</ref><box>(517,508),(589,611)</box>
|
121 |
+
image = tokenizer.draw_bbox_on_latest_picture(response, history)
|
122 |
+
if image:
|
123 |
+
image.save('1.jpg')
|
124 |
+
else:
|
125 |
+
print("no box")
|
126 |
+
```
|
127 |
+
|
128 |
+
<p align="center">
|
129 |
+
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo_highfive.jpg" width="500"/>
|
130 |
+
<p>
|
131 |
+
<br>
|
132 |
+
|
133 |
+
## 量化 (Quantization)
|
134 |
+
|
135 |
+
### 用法 (Usage)
|
136 |
+
|
137 |
+
当前我们提供了基于[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)的量化方案,并提供了Qwen-VL-Chat的Int4量化版本Qwen-VL-Chat-Int4 [点击此处](https://huggingface.co/Qwen/Qwen-VL-Chat-Int4)。该模型在效果评测上几乎无损,并在显存占用和推理速度上具有明显优势。
|
138 |
+
|
139 |
+
下文说明如何使用该量化模型。开始之前,请确保你满足要求(如torch2.0及以上、transformers 4.32.0及以上,等)并安装所需的代码库:
|
140 |
+
|
141 |
+
We provide a new solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release an Int4 quantized model for Qwen-VL-Chat, Qwen-VL-Chat-Int4 [Click here](https://huggingface.co/Qwen/Qwen-VL-Chat-Int4), which achieves nearly lossless model effects but improved performance on both memory costs and inference speed.
|
142 |
+
|
143 |
+
Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages:
|
144 |
+
|
145 |
+
```bash
|
146 |
+
pip install optimum
|
147 |
+
git clone https://github.com/JustinLin610/AutoGPTQ.git & cd AutoGPTQ
|
148 |
+
pip install -v .
|
149 |
+
```
|
150 |
+
|
151 |
+
如遇到安装 `auto-gptq` 的问题,建议您前往官方[repo](https://github.com/PanQiWei/AutoGPTQ) 寻找合适的wheel。
|
152 |
+
|
153 |
+
随后你便可以按照上述用法,轻松调用量化模型:
|
154 |
+
|
155 |
+
If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a wheel.
|
156 |
+
|
157 |
+
Then you can load the quantized model easily and run inference as same as usual:
|
158 |
+
|
159 |
+
```python
|
160 |
+
model = AutoModelForCausalLM.from_pretrained(
|
161 |
+
"Qwen/Qwen-VL-Chat-Int4",
|
162 |
+
device_map="auto",
|
163 |
+
trust_remote_code=True
|
164 |
+
).eval()
|
165 |
+
# Either a local path or an u[](https://)rl between <img></img> tags.
|
166 |
+
image_path = 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'
|
167 |
+
response, history = model.chat(tokenizer, query=f'<img>{image_path}</img>这是什么', history=None)
|
168 |
+
print(response)
|
169 |
+
```
|
170 |
+
|
171 |
+
### 效果评测 (Performance)
|
172 |
+
|
173 |
+
我们列出不同精度下模型在评测基准 **[TouchStone](https://github.com/OFA-Sys/TouchStone)** 上的表现,并发现量化模型并没有显著性能损失。结果如下所示:
|
174 |
+
|
175 |
+
We illustrate the model performance of both BF16 and Int4 models on the benchmark **[TouchStone](https://github.com/OFA-Sys/TouchStone)**, and we find that the quantized model does not suffer from significant performance degradation. Results are shown below:
|
176 |
+
|
177 |
+
| Quantization | ZH. | EN |
|
178 |
+
| ------------ | :--------: | :-----------: |
|
179 |
+
| BF16 | 401.2 | 645.2 |
|
180 |
+
| Int4 | 386.6 | 651.4 |
|
181 |
+
|
182 |
+
### 推理速度 (Inference Speed)
|
183 |
+
|
184 |
+
我们测算了在输入一张图片(即258个token)的条件下BF16和Int4的模型生成1792 (2048-258) 和 7934 (8192-258) 个token的平均速度。
|
185 |
+
|
186 |
+
We measured the average inference speed (tokens/s) of generating 1792 (2048-258) and 7934 (8192-258) tokens with the context of an image (which takes 258 tokens) under BF16 precision and Int4 quantization, respectively.
|
187 |
+
|
188 |
+
| Quantization | Speed (2048 tokens) | Speed (8192 tokens) |
|
189 |
+
| ------------ | :-----------------: | :-----------------: |
|
190 |
+
| BF16 | 28.87 | 24.32 |
|
191 |
+
| Int4 | 37.79 | 34.34 |
|
192 |
+
|
193 |
+
推理速度测算是在单卡 A100-SXM4-80G GPU上运行,使用PyTorch 2.0.1及CUDA 11.4。
|
194 |
+
|
195 |
+
The profiling runs on a single A100-SXM4-80G GPU with PyTorch 2.0.1 and CUDA 11.4.
|
196 |
+
|
197 |
+
### GPU显存占用 (GPU Memory Usage)
|
198 |
+
|
199 |
+
我们还测算了在一张图片输入的条件下BF16和Int4模型生成1792 (2048-258) 和 7934 (8192-258) 个token所需显存。结果如下所示:
|
200 |
+
|
201 |
+
We also profile the peak GPU memory usage for encoding 1792 (2048-258) tokens (including an image) as context (and generating single token) and generating 7934 (8192-258) tokens (with an image as context) under BF16 or Int4 quantization level, respectively. The results are shown below.
|
202 |
+
|
203 |
+
| Quantization | Peak Usage for Encoding 2048 Tokens | Peak Usage for Generating 8192 Tokens |
|
204 |
+
| ------------ | :---------------------------------: | :-----------------------------------: |
|
205 |
+
| BF16 | 22.60GB | 28.01GB |
|
206 |
+
| Int4 | 11.82GB | 17.23GB |
|
207 |
+
|
208 |
+
上述速度和显存测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile_mm.py)完成。
|
209 |
+
|
210 |
+
The above speed and memory profiling are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile_mm.py).
|
211 |
+
<br>
|
212 |
+
|
213 |
+
## 评测
|
214 |
+
|
215 |
+
我们从两个角度评测了两个模型的能力:
|
216 |
+
|
217 |
+
1. 在**英文标准 Benchmark** 上评测模型的基础任务能力。目前评测了四大类多模态任务:
|
218 |
+
|
219 |
+
- Zero-shot Caption: 评测模型在未见过数据集上的零样本图片描述能力;
|
220 |
+
- General VQA: 评测模型的通用问答能力,例如判断题、颜色、个数、类目等问答能力;
|
221 |
+
- Text-based VQA:评测模型对于图片中文字相关的识别/问答能力,例如文档问答、图表问答、文字问答等;
|
222 |
+
- Referring Expression Compression:评测模型给定物体描述画检测框的能力;
|
223 |
+
2. **试金石 (TouchStone)**:为了评测模型整体的图文对话能力和人类对齐水平。我们为此构建了一个基于 GPT4 打分来评测 LVLM 模型的 Benchmark:TouchStone。在 TouchStone-v0.1 中:
|
224 |
+
|
225 |
+
- 评测基准总计涵盖 300+张图片、800+道题目、27个类别。包括基础属性问答、人物地标问答、影视作品问答、视觉推理、反事实推理、诗歌创作、故事写作,商品比较、图片解题等**尽可能广泛的类别**。
|
226 |
+
- 为了弥补目前 GPT4 无法直接读取图片的缺陷,我们给所有的带评测图片提供了**人工标注的充分详细描述**,并且将图片的详细描述、问题和模型的输出结果一起交给 GPT4 打分。
|
227 |
+
- 评测同时包含英文版本和中文版本。
|
228 |
+
|
229 |
+
评测结果如下:
|
230 |
+
|
231 |
+
We evaluated the model's ability from two perspectives:
|
232 |
+
|
233 |
+
1. **Standard Benchmarks**: We evaluate the model's basic task capabilities on four major categories of multimodal tasks:
|
234 |
+
|
235 |
+
- Zero-shot Caption: Evaluate model's zero-shot image captioning ability on unseen datasets;
|
236 |
+
- General VQA: Evaluate the general question-answering ability of pictures, such as the judgment, color, number, category, etc;
|
237 |
+
- Text-based VQA: Evaluate the model's ability to recognize text in pictures, such as document QA, chart QA, etc;
|
238 |
+
- Referring Expression Comprehension: Evaluate the ability to localize a target object in an image described by a referring expression.
|
239 |
+
2. **TouchStone**: To evaluate the overall text-image dialogue capability and alignment level with humans, we have constructed a benchmark called TouchStone, which is based on scoring with GPT4 to evaluate the LVLM model.
|
240 |
+
|
241 |
+
- The TouchStone benchmark covers a total of 300+ images, 800+ questions, and 27 categories. Such as attribute-based Q&A, celebrity recognition, writing poetry, summarizing multiple images, product comparison, math problem solving, etc;
|
242 |
+
- In order to break the current limitation of GPT4 in terms of direct image input, TouchStone provides fine-grained image annotations by human labeling. These detailed annotations, along with the questions and the model's output, are then presented to GPT4 for scoring.
|
243 |
+
- The benchmark includes both English and Chinese versions.
|
244 |
+
|
245 |
+
The results of the evaluation are as follows:
|
246 |
+
|
247 |
+
Qwen-VL outperforms current SOTA generalist models on multiple VL tasks and has a more comprehensive coverage in terms of capability range.
|
248 |
+
|
249 |
+
<p align="center">
|
250 |
+
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/radar.png" width="600"/>
|
251 |
+
<p>
|
252 |
+
|
253 |
+
### 零样本图像描述 & 通用视觉问答 (Zero-shot Captioning & General VQA)
|
254 |
+
|
255 |
+
<table>
|
256 |
+
<thead>
|
257 |
+
<tr>
|
258 |
+
<th rowspan="2">Model type</th>
|
259 |
+
<th rowspan="2">Model</th>
|
260 |
+
<th colspan="2">Zero-shot Captioning</th>
|
261 |
+
<th colspan="5">General VQA</th>
|
262 |
+
</tr>
|
263 |
+
<tr>
|
264 |
+
<th>NoCaps</th>
|
265 |
+
<th>Flickr30K</th>
|
266 |
+
<th>VQAv2<sup>dev</sup></th>
|
267 |
+
<th>OK-VQA</th>
|
268 |
+
<th>GQA</th>
|
269 |
+
<th>SciQA-Img<br>(0-shot)</th>
|
270 |
+
<th>VizWiz<br>(0-shot)</th>
|
271 |
+
</tr>
|
272 |
+
</thead>
|
273 |
+
<tbody align="center">
|
274 |
+
<tr>
|
275 |
+
<td rowspan="10">Generalist<br>Models</td>
|
276 |
+
<td>Flamingo-9B</td>
|
277 |
+
<td>-</td>
|
278 |
+
<td>61.5</td>
|
279 |
+
<td>51.8</td>
|
280 |
+
<td>44.7</td>
|
281 |
+
<td>-</td>
|
282 |
+
<td>-</td>
|
283 |
+
<td>28.8</td>
|
284 |
+
</tr>
|
285 |
+
<tr>
|
286 |
+
<td>Flamingo-80B</td>
|
287 |
+
<td>-</td>
|
288 |
+
<td>67.2</td>
|
289 |
+
<td>56.3</td>
|
290 |
+
<td>50.6</td>
|
291 |
+
<td>-</td>
|
292 |
+
<td>-</td>
|
293 |
+
<td>31.6</td>
|
294 |
+
</tr>
|
295 |
+
<tr>
|
296 |
+
<td>Unified-IO-XL</td>
|
297 |
+
<td>100.0</td>
|
298 |
+
<td>-</td>
|
299 |
+
<td>77.9</td>
|
300 |
+
<td>54.0</td>
|
301 |
+
<td>-</td>
|
302 |
+
<td>-</td>
|
303 |
+
<td>-</td>
|
304 |
+
</tr>
|
305 |
+
<tr>
|
306 |
+
<td>Kosmos-1</td>
|
307 |
+
<td>-</td>
|
308 |
+
<td>67.1</td>
|
309 |
+
<td>51.0</td>
|
310 |
+
<td>-</td>
|
311 |
+
<td>-</td>
|
312 |
+
<td>-</td>
|
313 |
+
<td>29.2</td>
|
314 |
+
</tr>
|
315 |
+
<tr>
|
316 |
+
<td>Kosmos-2</td>
|
317 |
+
<td>-</td>
|
318 |
+
<td>66.7</td>
|
319 |
+
<td>45.6</td>
|
320 |
+
<td>-</td>
|
321 |
+
<td>-</td>
|
322 |
+
<td>-</td>
|
323 |
+
<td>-</td>
|
324 |
+
</tr>
|
325 |
+
<tr>
|
326 |
+
<td>BLIP-2 (Vicuna-13B)</td>
|
327 |
+
<td>103.9</td>
|
328 |
+
<td>71.6</td>
|
329 |
+
<td>65.0</td>
|
330 |
+
<td>45.9</td>
|
331 |
+
<td>32.3</td>
|
332 |
+
<td>61.0</td>
|
333 |
+
<td>19.6</td>
|
334 |
+
</tr>
|
335 |
+
<tr>
|
336 |
+
<td>InstructBLIP (Vicuna-13B)</td>
|
337 |
+
<td><strong>121.9</strong></td>
|
338 |
+
<td>82.8</td>
|
339 |
+
<td>-</td>
|
340 |
+
<td>-</td>
|
341 |
+
<td>49.5</td>
|
342 |
+
<td>63.1</td>
|
343 |
+
<td>33.4</td>
|
344 |
+
</tr>
|
345 |
+
<tr>
|
346 |
+
<td>Shikra (Vicuna-13B)</td>
|
347 |
+
<td>-</td>
|
348 |
+
<td>73.9</td>
|
349 |
+
<td>77.36</td>
|
350 |
+
<td>47.16</td>
|
351 |
+
<td>-</td>
|
352 |
+
<td>-</td>
|
353 |
+
<td>-</td>
|
354 |
+
</tr>
|
355 |
+
<tr>
|
356 |
+
<td><strong>Qwen-VL (Qwen-7B)</strong></td>
|
357 |
+
<td>121.4</td>
|
358 |
+
<td><b>85.8</b></td>
|
359 |
+
<td><b>78.8</b></td>
|
360 |
+
<td><b>58.6</b></td>
|
361 |
+
<td><b>59.3</b></td>
|
362 |
+
<td>67.1</td>
|
363 |
+
<td>35.2</td>
|
364 |
+
</tr>
|
365 |
+
<!-- <tr>
|
366 |
+
<td>Qwen-VL (4-shot)</td>
|
367 |
+
<td>-</td>
|
368 |
+
<td>-</td>
|
369 |
+
<td>-</td>
|
370 |
+
<td>63.6</td>
|
371 |
+
<td>-</td>
|
372 |
+
<td>-</td>
|
373 |
+
<td>39.1</td>
|
374 |
+
</tr> -->
|
375 |
+
<tr>
|
376 |
+
<td>Qwen-VL-Chat</td>
|
377 |
+
<td>120.2</td>
|
378 |
+
<td>81.0</td>
|
379 |
+
<td>78.2</td>
|
380 |
+
<td>56.6</td>
|
381 |
+
<td>57.5</td>
|
382 |
+
<td><b>68.2</b></td>
|
383 |
+
<td><b>38.9</b></td>
|
384 |
+
</tr>
|
385 |
+
<!-- <tr>
|
386 |
+
<td>Qwen-VL-Chat (4-shot)</td>
|
387 |
+
<td>-</td>
|
388 |
+
<td>-</td>
|
389 |
+
<td>-</td>
|
390 |
+
<td>60.6</td>
|
391 |
+
<td>-</td>
|
392 |
+
<td>-</td>
|
393 |
+
<td>44.45</td>
|
394 |
+
</tr> -->
|
395 |
+
<tr>
|
396 |
+
<td>Previous SOTA<br>(Per Task Fine-tuning)</td>
|
397 |
+
<td>-</td>
|
398 |
+
<td>127.0<br>(PALI-17B)</td>
|
399 |
+
<td>84.5<br>(InstructBLIP<br>-FlanT5-XL)</td>
|
400 |
+
<td>86.1<br>(PALI-X<br>-55B)</td>
|
401 |
+
<td>66.1<br>(PALI-X<br>-55B)</td>
|
402 |
+
<td>72.1<br>(CFR)</td>
|
403 |
+
<td>92.53<br>(LLaVa+<br>GPT-4)</td>
|
404 |
+
<td>70.9<br>(PALI-X<br>-55B)</td>
|
405 |
+
</tr>
|
406 |
+
</tbody>
|
407 |
+
</table>
|
408 |
+
|
409 |
+
- 在 Zero-shot Caption 中,Qwen-VL 在 Flickr30K 数据集上取得了 **SOTA** 的结果,并在 Nocaps 数据集上取得了和 InstructBlip 可竞争的结果。
|
410 |
+
- 在 General VQA 中,Qwen-VL 取得了 LVLM 模型同等量级和设定下 **SOTA** 的结果。
|
411 |
+
- For zero-shot image captioning, Qwen-VL achieves the **SOTA** on Flickr30K and competitive results on Nocaps with InstructBlip.
|
412 |
+
- For general VQA, Qwen-VL achieves the **SOTA** under the same generalist LVLM scale settings.
|
413 |
+
|
414 |
+
### 文本导向的视觉问答 (Text-oriented VQA)
|
415 |
+
|
416 |
+
<table>
|
417 |
+
<thead>
|
418 |
+
<tr>
|
419 |
+
<th>Model type</th>
|
420 |
+
<th>Model</th>
|
421 |
+
<th>TextVQA</th>
|
422 |
+
<th>DocVQA</th>
|
423 |
+
<th>ChartQA</th>
|
424 |
+
<th>AI2D</th>
|
425 |
+
<th>OCR-VQA</th>
|
426 |
+
</tr>
|
427 |
+
</thead>
|
428 |
+
<tbody align="center">
|
429 |
+
<tr>
|
430 |
+
<td rowspan="5">Generalist Models</td>
|
431 |
+
<td>BLIP-2 (Vicuna-13B)</td>
|
432 |
+
<td>42.4</td>
|
433 |
+
<td>-</td>
|
434 |
+
<td>-</td>
|
435 |
+
<td>-</td>
|
436 |
+
<td>-</td>
|
437 |
+
</tr>
|
438 |
+
<tr>
|
439 |
+
<td>InstructBLIP (Vicuna-13B)</td>
|
440 |
+
<td>50.7</td>
|
441 |
+
<td>-</td>
|
442 |
+
<td>-</td>
|
443 |
+
<td>-</td>
|
444 |
+
<td>-</td>
|
445 |
+
</tr>
|
446 |
+
<tr>
|
447 |
+
<td>mPLUG-DocOwl (LLaMA-7B)</td>
|
448 |
+
<td>52.6</td>
|
449 |
+
<td>62.2</td>
|
450 |
+
<td>57.4</td>
|
451 |
+
<td>-</td>
|
452 |
+
<td>-</td>
|
453 |
+
</tr>
|
454 |
+
<tr>
|
455 |
+
<td>Pic2Struct-Large (1.3B)</td>
|
456 |
+
<td>-</td>
|
457 |
+
<td><b>76.6</b></td>
|
458 |
+
<td>58.6</td>
|
459 |
+
<td>42.1</td>
|
460 |
+
<td>71.3</td>
|
461 |
+
</tr>
|
462 |
+
<tr>
|
463 |
+
<td>Qwen-VL (Qwen-7B)</td>
|
464 |
+
<td><b>63.8</b></td>
|
465 |
+
<td>65.1</td>
|
466 |
+
<td><b>65.7</b></td>
|
467 |
+
<td><b>62.3</b></td>
|
468 |
+
<td><b>75.7</b></td>
|
469 |
+
</tr>
|
470 |
+
<tr>
|
471 |
+
<td>Specialist SOTAs<br>(Specialist/Finetuned)</td>
|
472 |
+
<td>PALI-X-55B (Single-task FT)<br>(Without OCR Pipeline)</td>
|
473 |
+
<td>71.44</td>
|
474 |
+
<td>80.0</td>
|
475 |
+
<td>70.0</td>
|
476 |
+
<td>81.2</td>
|
477 |
+
<td>75.0</td>
|
478 |
+
</tr>
|
479 |
+
</tbody>
|
480 |
+
</table>
|
481 |
+
|
482 |
+
- 在文字相关的识别/问答评测上,取得了当前规模下通用 LVLM 达到的最好结果。
|
483 |
+
- 分辨率对上述某几个评测非常重要,大部分 224 分辨率的开源 LVLM 模型无法完成以上评测,或只能通过切图的方式解决。Qwen-VL 将分辨率提升到 448,可以直接以端到端的方式进行以上评测。Qwen-VL 在很多任务上甚至超过了 1024 分辨率的 Pic2Struct-Large 模型。
|
484 |
+
- In text-related recognition/QA evaluation, Qwen-VL achieves the SOTA under the generalist LVLM scale settings.
|
485 |
+
- Resolution is important for several above evaluations. While most open-source LVLM models with 224 resolution are incapable of these evaluations or can only solve these by cutting images, Qwen-VL scales the resolution to 448 so that it can be evaluated end-to-end. Qwen-VL even outperforms Pic2Struct-Large models of 1024 resolution on some tasks.
|
486 |
+
|
487 |
+
### 细粒度视觉定位 (Referring Expression Comprehension)
|
488 |
+
|
489 |
+
<table>
|
490 |
+
<thead>
|
491 |
+
<tr>
|
492 |
+
<th rowspan="2">Model type</th>
|
493 |
+
<th rowspan="2">Model</th>
|
494 |
+
<th colspan="3">RefCOCO</th>
|
495 |
+
<th colspan="3">RefCOCO+</th>
|
496 |
+
<th colspan="2">RefCOCOg</th>
|
497 |
+
<th>GRIT</th>
|
498 |
+
</tr>
|
499 |
+
<tr>
|
500 |
+
<th>val</th>
|
501 |
+
<th>test-A</th>
|
502 |
+
<th>test-B</th>
|
503 |
+
<th>val</th>
|
504 |
+
<th>test-A</th>
|
505 |
+
<th>test-B</th>
|
506 |
+
<th>val-u</th>
|
507 |
+
<th>test-u</th>
|
508 |
+
<th>refexp</th>
|
509 |
+
</tr>
|
510 |
+
</thead>
|
511 |
+
<tbody align="center">
|
512 |
+
<tr>
|
513 |
+
<td rowspan="8">Generalist Models</td>
|
514 |
+
<td>GPV-2</td>
|
515 |
+
<td>-</td>
|
516 |
+
<td>-</td>
|
517 |
+
<td>-</td>
|
518 |
+
<td>-</td>
|
519 |
+
<td>-</td>
|
520 |
+
<td>-</td>
|
521 |
+
<td>-</td>
|
522 |
+
<td>-</td>
|
523 |
+
<td>51.50</td>
|
524 |
+
</tr>
|
525 |
+
<tr>
|
526 |
+
<td>OFA-L*</td>
|
527 |
+
<td>79.96</td>
|
528 |
+
<td>83.67</td>
|
529 |
+
<td>76.39</td>
|
530 |
+
<td>68.29</td>
|
531 |
+
<td>76.00</td>
|
532 |
+
<td>61.75</td>
|
533 |
+
<td>67.57</td>
|
534 |
+
<td>67.58</td>
|
535 |
+
<td>61.70</td>
|
536 |
+
</tr>
|
537 |
+
<tr>
|
538 |
+
<td>Unified-IO</td>
|
539 |
+
<td>-</td>
|
540 |
+
<td>-</td>
|
541 |
+
<td>-</td>
|
542 |
+
<td>-</td>
|
543 |
+
<td>-</td>
|
544 |
+
<td>-</td>
|
545 |
+
<td>-</td>
|
546 |
+
<td>-</td>
|
547 |
+
<td><b>78.61</b></td>
|
548 |
+
</tr>
|
549 |
+
<tr>
|
550 |
+
<td>VisionLLM-H</td>
|
551 |
+
<td></td>
|
552 |
+
<td>86.70</td>
|
553 |
+
<td>-</td>
|
554 |
+
<td>-</td>
|
555 |
+
<td>-</td>
|
556 |
+
<td>-</td>
|
557 |
+
<td>-</td>
|
558 |
+
<td>-</td>
|
559 |
+
<td>-</td>
|
560 |
+
</tr>
|
561 |
+
<tr>
|
562 |
+
<td>Shikra-7B</td>
|
563 |
+
<td>87.01</td>
|
564 |
+
<td>90.61</td>
|
565 |
+
<td>80.24 </td>
|
566 |
+
<td>81.60</td>
|
567 |
+
<td>87.36</td>
|
568 |
+
<td>72.12</td>
|
569 |
+
<td>82.27</td>
|
570 |
+
<td>82.19</td>
|
571 |
+
<td>69.34</td>
|
572 |
+
</tr>
|
573 |
+
<tr>
|
574 |
+
<td>Shikra-13B</td>
|
575 |
+
<td>87.83 </td>
|
576 |
+
<td>91.11</td>
|
577 |
+
<td>81.81</td>
|
578 |
+
<td>82.89</td>
|
579 |
+
<td>87.79</td>
|
580 |
+
<td>74.41</td>
|
581 |
+
<td>82.64</td>
|
582 |
+
<td>83.16</td>
|
583 |
+
<td>69.03</td>
|
584 |
+
</tr>
|
585 |
+
<tr>
|
586 |
+
<td>Qwen-VL-7B</td>
|
587 |
+
<td><b>89.36</b></td>
|
588 |
+
<td>92.26</td>
|
589 |
+
<td><b>85.34</b></td>
|
590 |
+
<td><b>83.12</b></td>
|
591 |
+
<td>88.25</td>
|
592 |
+
<td><b>77.21</b></td>
|
593 |
+
<td>85.58</td>
|
594 |
+
<td>85.48</td>
|
595 |
+
<td>78.22</td>
|
596 |
+
</tr>
|
597 |
+
<tr>
|
598 |
+
<td>Qwen-VL-7B-Chat</td>
|
599 |
+
<td>88.55</td>
|
600 |
+
<td><b>92.27</b></td>
|
601 |
+
<td>84.51</td>
|
602 |
+
<td>82.82</td>
|
603 |
+
<td><b>88.59</b></td>
|
604 |
+
<td>76.79</td>
|
605 |
+
<td><b>85.96</b></td>
|
606 |
+
<td><b>86.32</b></td>
|
607 |
+
<td>-</td>
|
608 |
+
<tr>
|
609 |
+
<td rowspan="3">Specialist SOTAs<br>(Specialist/Finetuned)</td>
|
610 |
+
<td>G-DINO-L</td>
|
611 |
+
<td>90.56 </td>
|
612 |
+
<td>93.19</td>
|
613 |
+
<td>88.24</td>
|
614 |
+
<td>82.75</td>
|
615 |
+
<td>88.95</td>
|
616 |
+
<td>75.92</td>
|
617 |
+
<td>86.13</td>
|
618 |
+
<td>87.02</td>
|
619 |
+
<td>-</td>
|
620 |
+
</tr>
|
621 |
+
<tr>
|
622 |
+
<td>UNINEXT-H</td>
|
623 |
+
<td>92.64 </td>
|
624 |
+
<td>94.33</td>
|
625 |
+
<td>91.46</td>
|
626 |
+
<td>85.24</td>
|
627 |
+
<td>89.63</td>
|
628 |
+
<td>79.79</td>
|
629 |
+
<td>88.73</td>
|
630 |
+
<td>89.37</td>
|
631 |
+
<td>-</td>
|
632 |
+
</tr>
|
633 |
+
<tr>
|
634 |
+
<td>ONE-PEACE</td>
|
635 |
+
<td>92.58 </td>
|
636 |
+
<td>94.18</td>
|
637 |
+
<td>89.26</td>
|
638 |
+
<td>88.77</td>
|
639 |
+
<td>92.21</td>
|
640 |
+
<td>83.23</td>
|
641 |
+
<td>89.22</td>
|
642 |
+
<td>89.27</td>
|
643 |
+
<td>-</td>
|
644 |
+
</tr>
|
645 |
+
</tbody>
|
646 |
+
</table>
|
647 |
+
|
648 |
+
- 在定位任务上,Qwen-VL 全面超过 Shikra-13B,取得了目前 Generalist LVLM 模型上在 Refcoco 上的 **SOTA**。
|
649 |
+
- Qwen-VL 并没有在任何中文定位数据上训练过,但通过中文 Caption 数据和 英文 Grounding 数据的训练,可以 Zero-shot 泛化出中文 Grounding 能力。
|
650 |
+
|
651 |
+
我们提供了以上**所有**评测脚本以供复现我们的实验结果。请阅读 [eval/EVALUATION.md](eval/EVALUATION.md) 了解更多信息。
|
652 |
+
|
653 |
+
- Qwen-VL achieves the **SOTA** in all above referring expression comprehension benchmarks.
|
654 |
+
- Qwen-VL has not been trained on any Chinese grounding data, but it can still generalize to the Chinese Grounding tasks in a zero-shot way by training Chinese Caption data and English Grounding data.
|
655 |
+
|
656 |
+
We provide all of the above evaluation scripts for reproducing our experimental results. Please read [eval/EVALUATION.md](eval/EVALUATION.md) for more information.
|
657 |
+
|
658 |
+
### 闲聊能力测评 (Chat Evaluation)
|
659 |
+
|
660 |
+
TouchStone 是一个基于 GPT4 打分来评测 LVLM 模型的图文对话能力和人类对齐水平的基准。它涵盖了 300+张图片、800+道题目、27个类别,包括基础属性、人物地标、视觉推理、诗歌创作、故事写作、商品比较、图片解题等**尽可能广泛的类别**。关于 TouchStone 的详细介绍,请参考[touchstone/README_CN.md](touchstone/README_CN.md)了解更多信息。
|
661 |
+
|
662 |
+
TouchStone is a benchmark based on scoring with GPT4 to evaluate the abilities of the LVLM model on text-image dialogue and alignment levels with humans. It covers a total of 300+ images, 800+ questions, and 27 categories, such as attribute-based Q&A, celebrity recognition, writing poetry, summarizing multiple images, product comparison, math problem solving, etc. Please read [touchstone/README_CN.md](touchstone/README.md) for more information.
|
663 |
+
|
664 |
+
#### 英语 (English)
|
665 |
+
|
666 |
+
| Model | Score |
|
667 |
+
|---------------|-------|
|
668 |
+
| PandaGPT | 488.5 |
|
669 |
+
| MiniGPT4 | 531.7 |
|
670 |
+
| InstructBLIP | 552.4 |
|
671 |
+
| LLaMA-AdapterV2 | 590.1 |
|
672 |
+
| mPLUG-Owl | 605.4 |
|
673 |
+
| LLaVA | 602.7 |
|
674 |
+
| Qwen-VL-Chat | 645.2 |
|
675 |
+
|
676 |
+
#### 中文 (Chinese)
|
677 |
+
|
678 |
+
| Model | Score |
|
679 |
+
|---------------|-------|
|
680 |
+
| VisualGLM | 247.1 |
|
681 |
+
| Qwen-VL-Chat | 401.2 |
|
682 |
+
|
683 |
+
Qwen-VL-Chat 模型在中英文的对齐评测中均取得当前 LVLM 模型下的最��结果。
|
684 |
+
|
685 |
+
Qwen-VL-Chat has achieved the best results in both Chinese and English alignment evaluation.
|
686 |
+
<br>
|
687 |
+
|
688 |
+
## 常见问题 (FAQ)
|
689 |
+
|
690 |
+
如遇到问题,敬请查阅 [FAQ](https://github.com/QwenLM/Qwen-VL/blob/master/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
|
691 |
+
|
692 |
+
If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen-VL/blob/master/FAQ.md) and the issues first to search a solution before you launch a new issue.
|
693 |
+
<br>
|
694 |
+
|
695 |
+
## 使用协议 (License Agreement)
|
696 |
+
|
697 |
+
研究人员与开发者可使用Qwen-VL和Qwen-VL-Chat或进行二次开发。我们同样允许商业使用,具体细节请查看[LICENSE](https://github.com/QwenLM/Qwen-VL/blob/master/LICENSE)。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/qianwen)申请。
|
698 |
+
|
699 |
+
Researchers and developers are free to use the codes and model weights of both Qwen-VL and Qwen-VL-Chat. We also allow their commercial use. Check our license at [LICENSE](LICENSE) for more details.
|
700 |
+
<br>
|
701 |
+
|
702 |
+
## 引用 (Citation)
|
703 |
+
|
704 |
+
如果你觉得我们的论文和代码对你的研究有帮助,请考虑:star: 和引用 :pencil: :)
|
705 |
+
|
706 |
+
If you find our paper and code useful in your research, please consider giving a star :star: and citation :pencil: :)
|
707 |
+
|
708 |
+
```BibTeX
|
709 |
+
@article{Qwen-VL,
|
710 |
+
title={Qwen-VL: A Frontier Large Vision-Language Model with Versatile Abilities},
|
711 |
+
author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren},
|
712 |
+
journal={arXiv preprint arXiv:2308.12966},
|
713 |
+
year={2023}
|
714 |
+
}
|
715 |
+
```
|
716 |
+
<br>
|
717 |
+
|
718 |
+
## 联系我们 (Contact Us)
|
719 |
+
|
720 |
+
如果你想给我们的研发团队和产品团队留言,请通过邮件([email protected])联系我们。
|
721 |
+
|
722 |
+
If you are interested to leave a message to either our research team or product team, feel free to send an email to [email protected].
|
723 |
+
|
724 |
+
```
|
725 |
+
|
726 |
+
```
|
727 |
+
|
Qwen-VL-Chat/SimSun.ttf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca4da082cd970f0c8abaa79f213ddcbc475f7b5afabcb81b385998f9ebfbb53f
|
3 |
+
size 10499104
|
Qwen-VL-Chat/config.json
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "./",
|
3 |
+
"architectures": [
|
4 |
+
"QWenLMHeadModel"
|
5 |
+
],
|
6 |
+
"attn_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_qwen.QWenConfig",
|
9 |
+
"AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
|
10 |
+
},
|
11 |
+
"bf16": false,
|
12 |
+
"emb_dropout_prob": 0.0,
|
13 |
+
"fp16": false,
|
14 |
+
"fp32": false,
|
15 |
+
"hidden_size": 4096,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 22016,
|
18 |
+
"kv_channels": 128,
|
19 |
+
"layer_norm_epsilon": 1e-06,
|
20 |
+
"max_position_embeddings": 8192,
|
21 |
+
"model_type": "qwen",
|
22 |
+
"no_bias": true,
|
23 |
+
"num_attention_heads": 32,
|
24 |
+
"num_hidden_layers": 32,
|
25 |
+
"onnx_safe": null,
|
26 |
+
"rotary_emb_base": 10000,
|
27 |
+
"rotary_pct": 1.0,
|
28 |
+
"scale_attn_weights": true,
|
29 |
+
"seq_length": 2048,
|
30 |
+
"tie_word_embeddings": false,
|
31 |
+
"tokenizer_type": "QWenTokenizer",
|
32 |
+
"torch_dtype": "bfloat16",
|
33 |
+
"transformers_version": "4.31.0",
|
34 |
+
"use_cache": true,
|
35 |
+
"use_dynamic_ntk": true,
|
36 |
+
"use_flash_attn": false,
|
37 |
+
"use_logn_attn": true,
|
38 |
+
"visual": {
|
39 |
+
"heads": 16,
|
40 |
+
"image_size": 448,
|
41 |
+
"image_start_id": 151857,
|
42 |
+
"layers": 48,
|
43 |
+
"mlp_ratio": 4.9231,
|
44 |
+
"output_dim": 4096,
|
45 |
+
"patch_size": 14,
|
46 |
+
"width": 1664
|
47 |
+
},
|
48 |
+
"vocab_size": 151936
|
49 |
+
}
|
Qwen-VL-Chat/configuration_qwen.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
from transformers import PretrainedConfig
|
7 |
+
|
8 |
+
|
9 |
+
class QWenConfig(PretrainedConfig):
|
10 |
+
model_type = "qwen"
|
11 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
12 |
+
|
13 |
+
def __init__(
|
14 |
+
self,
|
15 |
+
vocab_size=151936,
|
16 |
+
hidden_size=4096,
|
17 |
+
num_hidden_layers=32,
|
18 |
+
num_attention_heads=32,
|
19 |
+
emb_dropout_prob=0.0,
|
20 |
+
attn_dropout_prob=0.0,
|
21 |
+
layer_norm_epsilon=1e-6,
|
22 |
+
initializer_range=0.02,
|
23 |
+
max_position_embeddings=8192,
|
24 |
+
scale_attn_weights=True,
|
25 |
+
use_cache=True,
|
26 |
+
bf16=False,
|
27 |
+
fp16=False,
|
28 |
+
fp32=False,
|
29 |
+
kv_channels=128,
|
30 |
+
rotary_pct=1.0,
|
31 |
+
rotary_emb_base=10000,
|
32 |
+
use_dynamic_ntk=True,
|
33 |
+
use_logn_attn=True,
|
34 |
+
use_flash_attn="auto",
|
35 |
+
intermediate_size=22016,
|
36 |
+
no_bias=True,
|
37 |
+
tie_word_embeddings=False,
|
38 |
+
**kwargs,
|
39 |
+
):
|
40 |
+
self.vocab_size = vocab_size
|
41 |
+
self.hidden_size = hidden_size
|
42 |
+
self.intermediate_size = intermediate_size
|
43 |
+
self.num_hidden_layers = num_hidden_layers
|
44 |
+
self.num_attention_heads = num_attention_heads
|
45 |
+
self.emb_dropout_prob = emb_dropout_prob
|
46 |
+
self.attn_dropout_prob = attn_dropout_prob
|
47 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
48 |
+
self.initializer_range = initializer_range
|
49 |
+
self.scale_attn_weights = scale_attn_weights
|
50 |
+
self.use_cache = use_cache
|
51 |
+
self.max_position_embeddings = max_position_embeddings
|
52 |
+
self.bf16 = bf16
|
53 |
+
self.fp16 = fp16
|
54 |
+
self.fp32 = fp32
|
55 |
+
self.kv_channels = kv_channels
|
56 |
+
self.rotary_pct = rotary_pct
|
57 |
+
self.rotary_emb_base = rotary_emb_base
|
58 |
+
self.use_dynamic_ntk = use_dynamic_ntk
|
59 |
+
self.use_logn_attn = use_logn_attn
|
60 |
+
self.use_flash_attn = use_flash_attn
|
61 |
+
self.no_bias = no_bias
|
62 |
+
super().__init__(
|
63 |
+
tie_word_embeddings=tie_word_embeddings,
|
64 |
+
**kwargs
|
65 |
+
)
|
Qwen-VL-Chat/generation_config.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chat_format": "chatml",
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": 151643,
|
5 |
+
"max_new_tokens": 512,
|
6 |
+
"max_window_size": 6144,
|
7 |
+
"pad_token_id": 151643,
|
8 |
+
"top_k": 0,
|
9 |
+
"top_p": 0.3,
|
10 |
+
"transformers_version": "4.31.0"
|
11 |
+
}
|
Qwen-VL-Chat/modeling_qwen.py
ADDED
@@ -0,0 +1,1162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
import importlib
|
7 |
+
import math
|
8 |
+
from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch.nn.functional as F
|
12 |
+
import torch.utils.checkpoint
|
13 |
+
from torch.cuda.amp import autocast
|
14 |
+
|
15 |
+
from torch.nn import CrossEntropyLoss
|
16 |
+
from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
|
17 |
+
from transformers.generation.logits_process import LogitsProcessorList
|
18 |
+
|
19 |
+
if TYPE_CHECKING:
|
20 |
+
from transformers.generation.streamers import BaseStreamer
|
21 |
+
from transformers.generation.utils import GenerateOutput
|
22 |
+
from transformers.modeling_outputs import (
|
23 |
+
BaseModelOutputWithPast,
|
24 |
+
CausalLMOutputWithPast,
|
25 |
+
)
|
26 |
+
from transformers.modeling_utils import PreTrainedModel
|
27 |
+
from transformers.utils import logging
|
28 |
+
|
29 |
+
try:
|
30 |
+
from einops import rearrange
|
31 |
+
except ImportError:
|
32 |
+
rearrange = None
|
33 |
+
from torch import nn
|
34 |
+
|
35 |
+
SUPPORT_CUDA = torch.cuda.is_available()
|
36 |
+
SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
|
37 |
+
SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
38 |
+
|
39 |
+
from .configuration_qwen import QWenConfig
|
40 |
+
from .qwen_generation_utils import (
|
41 |
+
HistoryType,
|
42 |
+
make_context,
|
43 |
+
decode_tokens,
|
44 |
+
get_stop_words_ids,
|
45 |
+
StopWordsLogitsProcessor,
|
46 |
+
)
|
47 |
+
from .visual import VisionTransformer
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
_CHECKPOINT_FOR_DOC = "qwen"
|
53 |
+
_CONFIG_FOR_DOC = "QWenConfig"
|
54 |
+
|
55 |
+
QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
|
56 |
+
|
57 |
+
_ERROR_BAD_CHAT_FORMAT = """\
|
58 |
+
We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
|
59 |
+
If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
|
60 |
+
我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
|
61 |
+
如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
|
62 |
+
"""
|
63 |
+
|
64 |
+
_SENTINEL = object()
|
65 |
+
_ERROR_STREAM_IN_CHAT = """\
|
66 |
+
Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
|
67 |
+
向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
|
68 |
+
"""
|
69 |
+
|
70 |
+
apply_rotary_emb_func = None
|
71 |
+
rms_norm = None
|
72 |
+
|
73 |
+
|
74 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
75 |
+
def _make_causal_mask(
|
76 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
77 |
+
):
|
78 |
+
"""
|
79 |
+
Make causal mask used for bi-directional self-attention.
|
80 |
+
"""
|
81 |
+
bsz, tgt_len = input_ids_shape
|
82 |
+
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
|
83 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
84 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
85 |
+
mask = mask.to(dtype)
|
86 |
+
|
87 |
+
if past_key_values_length > 0:
|
88 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
89 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
90 |
+
|
91 |
+
|
92 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
93 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
94 |
+
"""
|
95 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
96 |
+
"""
|
97 |
+
bsz, src_len = mask.size()
|
98 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
99 |
+
|
100 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
101 |
+
|
102 |
+
inverted_mask = 1.0 - expanded_mask
|
103 |
+
|
104 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
105 |
+
|
106 |
+
|
107 |
+
class QWenAttention(nn.Module):
|
108 |
+
def __init__(self, config):
|
109 |
+
super().__init__()
|
110 |
+
|
111 |
+
self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
|
112 |
+
self.seq_length = config.seq_length
|
113 |
+
|
114 |
+
self.hidden_size = config.hidden_size
|
115 |
+
self.split_size = config.hidden_size
|
116 |
+
self.num_heads = config.num_attention_heads
|
117 |
+
self.head_dim = self.hidden_size // self.num_heads
|
118 |
+
|
119 |
+
self.scale_attn_weights = True
|
120 |
+
|
121 |
+
self.projection_size = config.kv_channels * config.num_attention_heads
|
122 |
+
|
123 |
+
assert self.projection_size % config.num_attention_heads == 0
|
124 |
+
self.hidden_size_per_attention_head = (
|
125 |
+
self.projection_size // config.num_attention_heads
|
126 |
+
)
|
127 |
+
|
128 |
+
self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
|
129 |
+
|
130 |
+
self.c_proj = nn.Linear(
|
131 |
+
config.hidden_size, self.projection_size, bias=not config.no_bias
|
132 |
+
)
|
133 |
+
|
134 |
+
self.is_fp32 = not (config.bf16 or config.fp16)
|
135 |
+
self.bf16 = config.bf16
|
136 |
+
|
137 |
+
self.use_dynamic_ntk = config.use_dynamic_ntk
|
138 |
+
self.use_logn_attn = config.use_logn_attn
|
139 |
+
|
140 |
+
logn_list = [
|
141 |
+
math.log(i, self.seq_length) if i > self.seq_length else 1
|
142 |
+
for i in range(1, 32768)
|
143 |
+
]
|
144 |
+
self.logn_tensor = torch.tensor(logn_list)[None, :, None, None]
|
145 |
+
|
146 |
+
self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
|
147 |
+
|
148 |
+
def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):
|
149 |
+
attn_weights = torch.matmul(query, key.transpose(-1, -2))
|
150 |
+
|
151 |
+
if self.scale_attn_weights:
|
152 |
+
attn_weights = attn_weights / torch.full(
|
153 |
+
[],
|
154 |
+
value.size(-1) ** 0.5,
|
155 |
+
dtype=attn_weights.dtype,
|
156 |
+
device=attn_weights.device,
|
157 |
+
)
|
158 |
+
|
159 |
+
query_length, key_length = query.size(-2), key.size(-2)
|
160 |
+
# causal_mask = self.bias[
|
161 |
+
# :, :, key_length - query_length : key_length, :key_length
|
162 |
+
# ]
|
163 |
+
# mask_value = torch.finfo(attn_weights.dtype).min
|
164 |
+
# mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(
|
165 |
+
# attn_weights.device
|
166 |
+
# )
|
167 |
+
# attn_weights = torch.where(
|
168 |
+
# causal_mask, attn_weights.to(attn_weights.dtype), mask_value
|
169 |
+
# )
|
170 |
+
attn_weights = attn_weights + attention_mask
|
171 |
+
|
172 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
173 |
+
|
174 |
+
attn_weights = attn_weights.type(value.dtype)
|
175 |
+
attn_weights = self.attn_dropout(attn_weights)
|
176 |
+
|
177 |
+
if head_mask is not None:
|
178 |
+
attn_weights = attn_weights * head_mask
|
179 |
+
|
180 |
+
attn_output = torch.matmul(attn_weights, value)
|
181 |
+
attn_output = attn_output.transpose(1, 2)
|
182 |
+
|
183 |
+
return attn_output, attn_weights
|
184 |
+
|
185 |
+
def _upcast_and_reordered_attn(
|
186 |
+
self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None
|
187 |
+
):
|
188 |
+
bsz, num_heads, q_seq_len, dk = query.size()
|
189 |
+
_, _, k_seq_len, _ = key.size()
|
190 |
+
|
191 |
+
attn_weights = torch.empty(
|
192 |
+
bsz * num_heads,
|
193 |
+
q_seq_len,
|
194 |
+
k_seq_len,
|
195 |
+
dtype=torch.float32,
|
196 |
+
device=query.device,
|
197 |
+
)
|
198 |
+
|
199 |
+
scale_factor = 1.0
|
200 |
+
if self.scale_attn_weights:
|
201 |
+
scale_factor /= float(value.size(-1)) ** 0.5
|
202 |
+
|
203 |
+
with autocast(enabled=False):
|
204 |
+
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
|
205 |
+
-1, dk, k_seq_len
|
206 |
+
)
|
207 |
+
attn_weights = torch.baddbmm(
|
208 |
+
attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
|
209 |
+
)
|
210 |
+
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
|
211 |
+
|
212 |
+
query_length, key_length = query.size(-2), key.size(-2)
|
213 |
+
causal_mask = registered_causal_mask[
|
214 |
+
:, :, key_length - query_length : key_length, :key_length
|
215 |
+
]
|
216 |
+
mask_value = torch.finfo(attn_weights.dtype).min
|
217 |
+
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
|
218 |
+
attn_weights.device
|
219 |
+
)
|
220 |
+
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
|
221 |
+
|
222 |
+
if attention_mask is not None:
|
223 |
+
attn_weights = attn_weights + attention_mask
|
224 |
+
|
225 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
226 |
+
|
227 |
+
if attn_weights.dtype != torch.float32:
|
228 |
+
raise RuntimeError(
|
229 |
+
"Error with upcasting, attn_weights does not have dtype torch.float32"
|
230 |
+
)
|
231 |
+
attn_weights = attn_weights.type(value.dtype)
|
232 |
+
attn_weights = self.attn_dropout(attn_weights)
|
233 |
+
|
234 |
+
if head_mask is not None:
|
235 |
+
attn_weights = attn_weights * head_mask
|
236 |
+
|
237 |
+
attn_output = torch.matmul(attn_weights, value)
|
238 |
+
|
239 |
+
return attn_output, attn_weights
|
240 |
+
|
241 |
+
def _split_heads(self, tensor, num_heads, attn_head_size):
|
242 |
+
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
|
243 |
+
tensor = tensor.view(new_shape)
|
244 |
+
return tensor
|
245 |
+
|
246 |
+
def _merge_heads(self, tensor, num_heads, attn_head_size):
|
247 |
+
tensor = tensor.contiguous()
|
248 |
+
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
|
249 |
+
return tensor.view(new_shape)
|
250 |
+
|
251 |
+
def forward(
|
252 |
+
self,
|
253 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
254 |
+
rotary_pos_emb: Optional[List[torch.Tensor]] = None,
|
255 |
+
registered_causal_mask: Optional[torch.Tensor] = None,
|
256 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
257 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
258 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
259 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
260 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
261 |
+
output_attentions: Optional[bool] = False,
|
262 |
+
use_cache: Optional[bool] = False,
|
263 |
+
):
|
264 |
+
|
265 |
+
mixed_x_layer = self.c_attn(hidden_states)
|
266 |
+
|
267 |
+
query, key, value = mixed_x_layer.split(self.split_size, dim=2)
|
268 |
+
|
269 |
+
query = self._split_heads(query, self.num_heads, self.head_dim)
|
270 |
+
key = self._split_heads(key, self.num_heads, self.head_dim)
|
271 |
+
value = self._split_heads(value, self.num_heads, self.head_dim)
|
272 |
+
|
273 |
+
if rotary_pos_emb is not None:
|
274 |
+
cur_len = query.shape[1]
|
275 |
+
rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
|
276 |
+
rotary_pos_emb = (rotary_pos_emb,) * 2
|
277 |
+
q_pos_emb, k_pos_emb = rotary_pos_emb
|
278 |
+
# Slice the pos emb for current inference
|
279 |
+
query = apply_rotary_pos_emb(query, q_pos_emb)
|
280 |
+
key = apply_rotary_pos_emb(key, k_pos_emb)
|
281 |
+
|
282 |
+
if layer_past is not None:
|
283 |
+
past_key, past_value = layer_past[0], layer_past[1]
|
284 |
+
key = torch.cat((past_key, key), dim=1)
|
285 |
+
value = torch.cat((past_value, value), dim=1)
|
286 |
+
|
287 |
+
if use_cache:
|
288 |
+
present = (key, value)
|
289 |
+
else:
|
290 |
+
present = None
|
291 |
+
|
292 |
+
if self.use_logn_attn and not self.training:
|
293 |
+
if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype:
|
294 |
+
self.logn_tensor = self.logn_tensor.to(query.device).type_as(query)
|
295 |
+
seq_start = key.size(1) - query.size(1)
|
296 |
+
seq_end = key.size(1)
|
297 |
+
logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :]
|
298 |
+
query = query * logn_tensor.expand_as(query)
|
299 |
+
|
300 |
+
query = query.permute(0, 2, 1, 3)
|
301 |
+
key = key.permute(0, 2, 1, 3)
|
302 |
+
value = value.permute(0, 2, 1, 3)
|
303 |
+
attn_output, attn_weight = self._attn(
|
304 |
+
query, key, value, registered_causal_mask, attention_mask, head_mask
|
305 |
+
)
|
306 |
+
context_layer = self._merge_heads(
|
307 |
+
attn_output, self.num_heads, self.head_dim
|
308 |
+
)
|
309 |
+
|
310 |
+
attn_output = self.c_proj(context_layer)
|
311 |
+
|
312 |
+
outputs = (attn_output, present)
|
313 |
+
if output_attentions:
|
314 |
+
outputs += (attn_weight,)
|
315 |
+
|
316 |
+
return outputs
|
317 |
+
|
318 |
+
|
319 |
+
class QWenMLP(nn.Module):
|
320 |
+
def __init__(self, config):
|
321 |
+
super().__init__()
|
322 |
+
self.w1 = nn.Linear(
|
323 |
+
config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
|
324 |
+
)
|
325 |
+
self.w2 = nn.Linear(
|
326 |
+
config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
|
327 |
+
)
|
328 |
+
ff_dim_in = config.intermediate_size // 2
|
329 |
+
self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
|
330 |
+
|
331 |
+
def forward(self, hidden_states):
|
332 |
+
a1 = self.w1(hidden_states)
|
333 |
+
a2 = self.w2(hidden_states)
|
334 |
+
intermediate_parallel = a1 * F.silu(a2)
|
335 |
+
output = self.c_proj(intermediate_parallel)
|
336 |
+
return output
|
337 |
+
|
338 |
+
class QWenBlock(nn.Module):
|
339 |
+
def __init__(self, config):
|
340 |
+
super().__init__()
|
341 |
+
hidden_size = config.hidden_size
|
342 |
+
self.bf16 = config.bf16
|
343 |
+
|
344 |
+
self.ln_1 = RMSNorm(
|
345 |
+
hidden_size,
|
346 |
+
eps=config.layer_norm_epsilon,
|
347 |
+
)
|
348 |
+
self.attn = QWenAttention(config)
|
349 |
+
self.ln_2 = RMSNorm(
|
350 |
+
hidden_size,
|
351 |
+
eps=config.layer_norm_epsilon,
|
352 |
+
)
|
353 |
+
|
354 |
+
self.mlp = QWenMLP(config)
|
355 |
+
|
356 |
+
def forward(
|
357 |
+
self,
|
358 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
359 |
+
rotary_pos_emb: Optional[List[torch.Tensor]] = None,
|
360 |
+
registered_causal_mask: Optional[torch.Tensor] = None,
|
361 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
362 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
363 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
364 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
365 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
366 |
+
use_cache: Optional[bool] = False,
|
367 |
+
output_attentions: Optional[bool] = False,
|
368 |
+
):
|
369 |
+
layernorm_output = self.ln_1(hidden_states)
|
370 |
+
|
371 |
+
attn_outputs = self.attn(
|
372 |
+
layernorm_output,
|
373 |
+
rotary_pos_emb,
|
374 |
+
registered_causal_mask=registered_causal_mask,
|
375 |
+
layer_past=layer_past,
|
376 |
+
attention_mask=attention_mask,
|
377 |
+
head_mask=head_mask,
|
378 |
+
use_cache=use_cache,
|
379 |
+
output_attentions=output_attentions,
|
380 |
+
)
|
381 |
+
attn_output = attn_outputs[0]
|
382 |
+
|
383 |
+
outputs = attn_outputs[1:]
|
384 |
+
|
385 |
+
residual = hidden_states
|
386 |
+
layernorm_input = attn_output + residual
|
387 |
+
|
388 |
+
layernorm_output = self.ln_2(layernorm_input)
|
389 |
+
|
390 |
+
residual = layernorm_input
|
391 |
+
mlp_output = self.mlp(layernorm_output)
|
392 |
+
hidden_states = residual + mlp_output
|
393 |
+
|
394 |
+
if use_cache:
|
395 |
+
outputs = (hidden_states,) + outputs
|
396 |
+
else:
|
397 |
+
outputs = (hidden_states,) + outputs[1:]
|
398 |
+
|
399 |
+
return outputs
|
400 |
+
|
401 |
+
|
402 |
+
class QWenPreTrainedModel(PreTrainedModel):
|
403 |
+
config_class = QWenConfig
|
404 |
+
base_model_prefix = "transformer"
|
405 |
+
is_parallelizable = False
|
406 |
+
supports_gradient_checkpointing = True
|
407 |
+
_no_split_modules = ["QWenBlock"]
|
408 |
+
|
409 |
+
def __init__(self, *inputs, **kwargs):
|
410 |
+
super().__init__(*inputs, **kwargs)
|
411 |
+
|
412 |
+
def _init_weights(self, module):
|
413 |
+
"""Initialize the weights."""
|
414 |
+
if isinstance(module, nn.Linear):
|
415 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
416 |
+
if module.bias is not None:
|
417 |
+
module.bias.data.zero_()
|
418 |
+
elif isinstance(module, nn.Embedding):
|
419 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
420 |
+
if module.padding_idx is not None:
|
421 |
+
module.weight.data[module.padding_idx].zero_()
|
422 |
+
elif isinstance(module, RMSNorm):
|
423 |
+
module.weight.data.fill_(1.0)
|
424 |
+
|
425 |
+
for name, p in module.named_parameters():
|
426 |
+
if name == "c_proj.weight":
|
427 |
+
p.data.normal_(
|
428 |
+
mean=0.0,
|
429 |
+
std=(
|
430 |
+
self.config.initializer_range
|
431 |
+
/ math.sqrt(2 * self.config.num_hidden_layers)
|
432 |
+
),
|
433 |
+
)
|
434 |
+
|
435 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
436 |
+
if isinstance(module, QWenModel):
|
437 |
+
module.gradient_checkpointing = value
|
438 |
+
|
439 |
+
|
440 |
+
class QWenModel(QWenPreTrainedModel):
|
441 |
+
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
|
442 |
+
|
443 |
+
def __init__(self, config):
|
444 |
+
super().__init__(config)
|
445 |
+
self.vocab_size = config.vocab_size
|
446 |
+
self.num_hidden_layers = config.num_hidden_layers
|
447 |
+
self.embed_dim = config.hidden_size
|
448 |
+
|
449 |
+
self.gradient_checkpointing = False
|
450 |
+
self.use_dynamic_ntk = config.use_dynamic_ntk
|
451 |
+
self.seq_length = config.seq_length
|
452 |
+
|
453 |
+
self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
|
454 |
+
|
455 |
+
self.drop = nn.Dropout(config.emb_dropout_prob)
|
456 |
+
|
457 |
+
if config.rotary_pct == 1.0:
|
458 |
+
self.rotary_ndims = None
|
459 |
+
else:
|
460 |
+
assert config.rotary_pct < 1
|
461 |
+
self.rotary_ndims = int(
|
462 |
+
config.kv_channels * config.rotary_pct
|
463 |
+
)
|
464 |
+
dim = (
|
465 |
+
self.rotary_ndims
|
466 |
+
if self.rotary_ndims is not None
|
467 |
+
else config.kv_channels
|
468 |
+
)
|
469 |
+
self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
|
470 |
+
|
471 |
+
self.use_flash_attn = config.use_flash_attn
|
472 |
+
self.is_fp32 = not (config.bf16 or config.fp16)
|
473 |
+
self.registered_causal_mask = None
|
474 |
+
# if (
|
475 |
+
# self.use_flash_attn
|
476 |
+
# and flash_attn_unpadded_func is not None
|
477 |
+
# and not self.is_fp32
|
478 |
+
# ):
|
479 |
+
# self.registered_causal_mask = None
|
480 |
+
# else:
|
481 |
+
# max_positions = config.max_position_embeddings
|
482 |
+
# self.register_buffer(
|
483 |
+
# "registered_causal_mask",
|
484 |
+
# torch.tril(
|
485 |
+
# torch.ones((max_positions, max_positions), dtype=torch.bool)
|
486 |
+
# ).view(1, 1, max_positions, max_positions),
|
487 |
+
# persistent=False,
|
488 |
+
# )
|
489 |
+
|
490 |
+
self.h = nn.ModuleList(
|
491 |
+
[
|
492 |
+
QWenBlock(
|
493 |
+
config
|
494 |
+
)
|
495 |
+
for i in range(config.num_hidden_layers)
|
496 |
+
]
|
497 |
+
)
|
498 |
+
self.ln_f = RMSNorm(
|
499 |
+
self.embed_dim,
|
500 |
+
eps=config.layer_norm_epsilon,
|
501 |
+
)
|
502 |
+
|
503 |
+
self.visual = VisionTransformer(**config.visual)
|
504 |
+
|
505 |
+
self.post_init()
|
506 |
+
|
507 |
+
def get_input_embeddings(self):
|
508 |
+
return self.wte
|
509 |
+
|
510 |
+
def set_input_embeddings(self, new_embeddings):
|
511 |
+
self.wte = new_embeddings
|
512 |
+
|
513 |
+
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
|
514 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
515 |
+
# create causal mask
|
516 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
517 |
+
combined_attention_mask = None
|
518 |
+
if input_shape[-1] > 1:
|
519 |
+
combined_attention_mask = _make_causal_mask(
|
520 |
+
input_shape,
|
521 |
+
inputs_embeds.dtype,
|
522 |
+
device=inputs_embeds.device,
|
523 |
+
past_key_values_length=past_key_values_length,
|
524 |
+
)
|
525 |
+
|
526 |
+
if attention_mask is not None:
|
527 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
528 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
529 |
+
inputs_embeds.device
|
530 |
+
)
|
531 |
+
combined_attention_mask = (
|
532 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
533 |
+
)
|
534 |
+
|
535 |
+
return combined_attention_mask
|
536 |
+
|
537 |
+
|
538 |
+
def forward(
|
539 |
+
self,
|
540 |
+
input_ids: Optional[torch.LongTensor] = None,
|
541 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
542 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
543 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
544 |
+
position_ids: Optional[torch.LongTensor] = None,
|
545 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
546 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
547 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
548 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
549 |
+
use_cache: Optional[bool] = None,
|
550 |
+
output_attentions: Optional[bool] = None,
|
551 |
+
output_hidden_states: Optional[bool] = None,
|
552 |
+
return_dict: Optional[bool] = None,
|
553 |
+
):
|
554 |
+
if past_key_values is None and torch.any(input_ids == self.config.visual['image_start_id']):
|
555 |
+
bos_pos = torch.where(input_ids == self.config.visual['image_start_id'])
|
556 |
+
eos_pos = torch.where(input_ids == self.config.visual['image_start_id'] + 1)
|
557 |
+
assert (bos_pos[0] == eos_pos[0]).all()
|
558 |
+
img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1)
|
559 |
+
images = []
|
560 |
+
for i, a, b in img_pos:
|
561 |
+
image = input_ids[i][a + 1 : b - 1].tolist()
|
562 |
+
image = image[ : image.index(self.config.visual['image_start_id'] + 2)]
|
563 |
+
images.append(bytes(image).decode('utf-8'))
|
564 |
+
|
565 |
+
images = self.visual.encode(images)
|
566 |
+
assert images.shape[0] == len(images)
|
567 |
+
fake_images = None
|
568 |
+
elif self.training:
|
569 |
+
fake_images=torch.zeros(1,3,224,224).to(
|
570 |
+
dtype=self.visual.conv1.weight.dtype, device=self.visual.conv1.weight.device)
|
571 |
+
images = self.visual(fake_images)
|
572 |
+
else:
|
573 |
+
fake_images = None
|
574 |
+
images = None
|
575 |
+
|
576 |
+
output_attentions = (
|
577 |
+
output_attentions
|
578 |
+
if output_attentions is not None
|
579 |
+
else self.config.output_attentions
|
580 |
+
)
|
581 |
+
output_hidden_states = (
|
582 |
+
output_hidden_states
|
583 |
+
if output_hidden_states is not None
|
584 |
+
else self.config.output_hidden_states
|
585 |
+
)
|
586 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
587 |
+
return_dict = (
|
588 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
589 |
+
)
|
590 |
+
|
591 |
+
if input_ids is not None and inputs_embeds is not None:
|
592 |
+
raise ValueError(
|
593 |
+
"You cannot specify both input_ids and inputs_embeds at the same time"
|
594 |
+
)
|
595 |
+
elif input_ids is not None:
|
596 |
+
input_shape = input_ids.size()
|
597 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
598 |
+
batch_size = input_ids.shape[0]
|
599 |
+
elif inputs_embeds is not None:
|
600 |
+
input_shape = inputs_embeds.size()[:-1]
|
601 |
+
batch_size = inputs_embeds.shape[0]
|
602 |
+
else:
|
603 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
604 |
+
|
605 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
606 |
+
|
607 |
+
if token_type_ids is not None:
|
608 |
+
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
609 |
+
if position_ids is not None:
|
610 |
+
position_ids = position_ids.view(-1, input_shape[-1])
|
611 |
+
|
612 |
+
if past_key_values is None:
|
613 |
+
past_length = 0
|
614 |
+
past_key_values = tuple([None] * len(self.h))
|
615 |
+
else:
|
616 |
+
past_length = past_key_values[0][0].size(-2)
|
617 |
+
|
618 |
+
if position_ids is None:
|
619 |
+
position_ids = torch.arange(
|
620 |
+
past_length,
|
621 |
+
input_shape[-1] + past_length,
|
622 |
+
dtype=torch.long,
|
623 |
+
device=device,
|
624 |
+
)
|
625 |
+
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
|
626 |
+
|
627 |
+
encoder_attention_mask = None
|
628 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
629 |
+
|
630 |
+
if inputs_embeds is None:
|
631 |
+
inputs_embeds = self.wte(input_ids)
|
632 |
+
|
633 |
+
if batch_size <= 0:
|
634 |
+
raise ValueError("batch_size has to be defined and > 0")
|
635 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
636 |
+
attention_mask, input_shape, inputs_embeds, past_length
|
637 |
+
)
|
638 |
+
|
639 |
+
hidden_states = inputs_embeds
|
640 |
+
|
641 |
+
kv_seq_len = hidden_states.size()[1]
|
642 |
+
if past_key_values[0] is not None:
|
643 |
+
# past key values[0][0] shape: bs * seq_len * head_num * dim
|
644 |
+
kv_seq_len += past_key_values[0][0].shape[1]
|
645 |
+
if (
|
646 |
+
self.use_dynamic_ntk
|
647 |
+
and kv_seq_len == hidden_states.size()[1]
|
648 |
+
and not self.training
|
649 |
+
):
|
650 |
+
context_value = math.log(kv_seq_len / self.seq_length, 2) + 1
|
651 |
+
ntk_alpha = 2 ** math.ceil(context_value) - 1
|
652 |
+
ntk_alpha = max(ntk_alpha, 1)
|
653 |
+
else:
|
654 |
+
ntk_alpha = self.rotary_emb._ntk_alpha_cached
|
655 |
+
|
656 |
+
rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha)
|
657 |
+
for idx in range(len(rotary_pos_emb)):
|
658 |
+
rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device)
|
659 |
+
|
660 |
+
hidden_states = self.drop(hidden_states).clone()
|
661 |
+
if fake_images is not None:
|
662 |
+
hidden_states = hidden_states + images.mean()*0
|
663 |
+
elif images is not None:
|
664 |
+
for idx, (i, a, b) in enumerate(img_pos):
|
665 |
+
hidden_states[i][a + 1 : b] = images[idx]
|
666 |
+
output_shape = input_shape + (hidden_states.size(-1),)
|
667 |
+
|
668 |
+
if self.gradient_checkpointing and self.training:
|
669 |
+
if use_cache:
|
670 |
+
logger.warning_once(
|
671 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
672 |
+
)
|
673 |
+
use_cache = False
|
674 |
+
|
675 |
+
presents = () if use_cache else None
|
676 |
+
all_self_attentions = () if output_attentions else None
|
677 |
+
all_hidden_states = () if output_hidden_states else None
|
678 |
+
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
679 |
+
|
680 |
+
if output_hidden_states:
|
681 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
682 |
+
|
683 |
+
if self.gradient_checkpointing and self.training:
|
684 |
+
|
685 |
+
def create_custom_forward(module):
|
686 |
+
def custom_forward(*inputs):
|
687 |
+
# None for past_key_value
|
688 |
+
return module(*inputs, use_cache, output_attentions)
|
689 |
+
|
690 |
+
return custom_forward
|
691 |
+
|
692 |
+
outputs = torch.utils.checkpoint.checkpoint(
|
693 |
+
create_custom_forward(block),
|
694 |
+
hidden_states,
|
695 |
+
rotary_pos_emb,
|
696 |
+
self.registered_causal_mask,
|
697 |
+
None,
|
698 |
+
attention_mask,
|
699 |
+
head_mask[i],
|
700 |
+
encoder_hidden_states,
|
701 |
+
encoder_attention_mask,
|
702 |
+
)
|
703 |
+
else:
|
704 |
+
outputs = block(
|
705 |
+
hidden_states,
|
706 |
+
layer_past=layer_past,
|
707 |
+
rotary_pos_emb=rotary_pos_emb,
|
708 |
+
registered_causal_mask=self.registered_causal_mask,
|
709 |
+
attention_mask=attention_mask,
|
710 |
+
head_mask=head_mask[i],
|
711 |
+
encoder_hidden_states=encoder_hidden_states,
|
712 |
+
encoder_attention_mask=encoder_attention_mask,
|
713 |
+
use_cache=use_cache,
|
714 |
+
output_attentions=output_attentions,
|
715 |
+
)
|
716 |
+
|
717 |
+
hidden_states = outputs[0]
|
718 |
+
if use_cache is True:
|
719 |
+
presents = presents + (outputs[1],)
|
720 |
+
|
721 |
+
if output_attentions:
|
722 |
+
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
723 |
+
|
724 |
+
hidden_states = self.ln_f(hidden_states)
|
725 |
+
hidden_states = hidden_states.view(output_shape)
|
726 |
+
# Add last hidden state
|
727 |
+
if output_hidden_states:
|
728 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
729 |
+
|
730 |
+
if not return_dict:
|
731 |
+
return tuple(
|
732 |
+
v for v in [hidden_states, presents, all_hidden_states] if v is not None
|
733 |
+
)
|
734 |
+
|
735 |
+
return BaseModelOutputWithPast(
|
736 |
+
last_hidden_state=hidden_states,
|
737 |
+
past_key_values=presents,
|
738 |
+
hidden_states=all_hidden_states,
|
739 |
+
attentions=all_self_attentions,
|
740 |
+
)
|
741 |
+
|
742 |
+
|
743 |
+
class QWenLMHeadModel(QWenPreTrainedModel):
|
744 |
+
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
|
745 |
+
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
|
746 |
+
|
747 |
+
def __init__(self, config):
|
748 |
+
super().__init__(config)
|
749 |
+
assert (
|
750 |
+
config.bf16 + config.fp16 + config.fp32 <= 1
|
751 |
+
), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
|
752 |
+
|
753 |
+
autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
|
754 |
+
|
755 |
+
if autoset_precision:
|
756 |
+
if SUPPORT_BF16:
|
757 |
+
logger.warn(
|
758 |
+
"The model is automatically converting to bf16 for faster inference. "
|
759 |
+
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
|
760 |
+
)
|
761 |
+
config.bf16 = True
|
762 |
+
elif SUPPORT_FP16:
|
763 |
+
logger.warn(
|
764 |
+
"The model is automatically converting to fp16 for faster inference. "
|
765 |
+
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
|
766 |
+
)
|
767 |
+
config.fp16 = True
|
768 |
+
else:
|
769 |
+
config.fp32 = True
|
770 |
+
|
771 |
+
if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
|
772 |
+
logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
|
773 |
+
if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
|
774 |
+
logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
|
775 |
+
if config.fp32:
|
776 |
+
if SUPPORT_BF16:
|
777 |
+
logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
|
778 |
+
elif SUPPORT_FP16:
|
779 |
+
logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
|
780 |
+
|
781 |
+
self.transformer = QWenModel(config)
|
782 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
783 |
+
|
784 |
+
if config.bf16:
|
785 |
+
self.transformer.bfloat16()
|
786 |
+
self.lm_head.bfloat16()
|
787 |
+
if config.fp16:
|
788 |
+
self.transformer.half()
|
789 |
+
self.lm_head.half()
|
790 |
+
self.post_init()
|
791 |
+
|
792 |
+
def get_output_embeddings(self):
|
793 |
+
return self.lm_head
|
794 |
+
|
795 |
+
def set_output_embeddings(self, new_embeddings):
|
796 |
+
self.lm_head = new_embeddings
|
797 |
+
|
798 |
+
def prepare_inputs_for_generation(
|
799 |
+
self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
|
800 |
+
):
|
801 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
802 |
+
if past_key_values:
|
803 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
804 |
+
if token_type_ids is not None:
|
805 |
+
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
|
806 |
+
|
807 |
+
attention_mask = kwargs.get("attention_mask", None)
|
808 |
+
position_ids = kwargs.get("position_ids", None)
|
809 |
+
|
810 |
+
if attention_mask is not None and position_ids is None:
|
811 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
812 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
813 |
+
if past_key_values:
|
814 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
815 |
+
else:
|
816 |
+
position_ids = None
|
817 |
+
|
818 |
+
if inputs_embeds is not None and past_key_values is None:
|
819 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
820 |
+
else:
|
821 |
+
model_inputs = {"input_ids": input_ids}
|
822 |
+
|
823 |
+
model_inputs.update(
|
824 |
+
{
|
825 |
+
"past_key_values": past_key_values,
|
826 |
+
"use_cache": kwargs.get("use_cache"),
|
827 |
+
"position_ids": position_ids,
|
828 |
+
"attention_mask": attention_mask,
|
829 |
+
"token_type_ids": token_type_ids,
|
830 |
+
}
|
831 |
+
)
|
832 |
+
return model_inputs
|
833 |
+
|
834 |
+
def forward(
|
835 |
+
self,
|
836 |
+
input_ids: Optional[torch.LongTensor] = None,
|
837 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
838 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
839 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
840 |
+
position_ids: Optional[torch.LongTensor] = None,
|
841 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
842 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
843 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
844 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
845 |
+
labels: Optional[torch.LongTensor] = None,
|
846 |
+
use_cache: Optional[bool] = None,
|
847 |
+
output_attentions: Optional[bool] = None,
|
848 |
+
output_hidden_states: Optional[bool] = None,
|
849 |
+
return_dict: Optional[bool] = None,
|
850 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
851 |
+
|
852 |
+
return_dict = (
|
853 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
854 |
+
)
|
855 |
+
|
856 |
+
transformer_outputs = self.transformer(
|
857 |
+
input_ids,
|
858 |
+
past_key_values=past_key_values,
|
859 |
+
attention_mask=attention_mask,
|
860 |
+
token_type_ids=token_type_ids,
|
861 |
+
position_ids=position_ids,
|
862 |
+
head_mask=head_mask,
|
863 |
+
inputs_embeds=inputs_embeds,
|
864 |
+
encoder_hidden_states=encoder_hidden_states,
|
865 |
+
encoder_attention_mask=encoder_attention_mask,
|
866 |
+
use_cache=use_cache,
|
867 |
+
output_attentions=output_attentions,
|
868 |
+
output_hidden_states=output_hidden_states,
|
869 |
+
return_dict=return_dict,
|
870 |
+
)
|
871 |
+
hidden_states = transformer_outputs[0]
|
872 |
+
|
873 |
+
lm_logits = self.lm_head(hidden_states)
|
874 |
+
|
875 |
+
loss = None
|
876 |
+
if labels is not None:
|
877 |
+
labels = labels.to(lm_logits.device)
|
878 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
879 |
+
shift_labels = labels[..., 1:].contiguous()
|
880 |
+
loss_fct = CrossEntropyLoss()
|
881 |
+
loss = loss_fct(
|
882 |
+
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
|
883 |
+
)
|
884 |
+
|
885 |
+
if not return_dict:
|
886 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
887 |
+
return ((loss,) + output) if loss is not None else output
|
888 |
+
|
889 |
+
return CausalLMOutputWithPast(
|
890 |
+
loss=loss,
|
891 |
+
logits=lm_logits,
|
892 |
+
past_key_values=transformer_outputs.past_key_values,
|
893 |
+
hidden_states=transformer_outputs.hidden_states,
|
894 |
+
attentions=transformer_outputs.attentions,
|
895 |
+
)
|
896 |
+
|
897 |
+
@staticmethod
|
898 |
+
def _reorder_cache(
|
899 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
900 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
901 |
+
|
902 |
+
return tuple(
|
903 |
+
tuple(
|
904 |
+
past_state.index_select(0, beam_idx.to(past_state.device))
|
905 |
+
for past_state in layer_past
|
906 |
+
)
|
907 |
+
for layer_past in past_key_values
|
908 |
+
)
|
909 |
+
|
910 |
+
def chat(
|
911 |
+
self,
|
912 |
+
tokenizer: PreTrainedTokenizer,
|
913 |
+
query: str,
|
914 |
+
history: Optional[HistoryType],
|
915 |
+
system: str = "You are a helpful assistant.",
|
916 |
+
append_history: bool = True,
|
917 |
+
stream: Optional[bool] = _SENTINEL,
|
918 |
+
stop_words_ids: Optional[List[List[int]]] = None,
|
919 |
+
generation_config: Optional[GenerationConfig] = None,
|
920 |
+
**kwargs,
|
921 |
+
) -> Tuple[str, HistoryType]:
|
922 |
+
generation_config = generation_config if generation_config is not None else self.generation_config
|
923 |
+
|
924 |
+
assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
|
925 |
+
assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
|
926 |
+
if history is None:
|
927 |
+
history = []
|
928 |
+
if stop_words_ids is None:
|
929 |
+
stop_words_ids = []
|
930 |
+
|
931 |
+
max_window_size = kwargs.get('max_window_size', None)
|
932 |
+
if max_window_size is None:
|
933 |
+
max_window_size = generation_config.max_window_size
|
934 |
+
raw_text, context_tokens = make_context(
|
935 |
+
tokenizer,
|
936 |
+
query,
|
937 |
+
history=history,
|
938 |
+
system=system,
|
939 |
+
max_window_size=max_window_size,
|
940 |
+
chat_format=generation_config.chat_format,
|
941 |
+
)
|
942 |
+
|
943 |
+
stop_words_ids.extend(get_stop_words_ids(
|
944 |
+
generation_config.chat_format, tokenizer
|
945 |
+
))
|
946 |
+
input_ids = torch.tensor([context_tokens]).to(self.device)
|
947 |
+
outputs = self.generate(
|
948 |
+
input_ids,
|
949 |
+
stop_words_ids=stop_words_ids,
|
950 |
+
return_dict_in_generate=False,
|
951 |
+
generation_config=generation_config,
|
952 |
+
**kwargs,
|
953 |
+
)
|
954 |
+
|
955 |
+
response = decode_tokens(
|
956 |
+
outputs[0],
|
957 |
+
tokenizer,
|
958 |
+
raw_text_len=len(raw_text),
|
959 |
+
context_length=len(context_tokens),
|
960 |
+
chat_format=generation_config.chat_format,
|
961 |
+
verbose=False,
|
962 |
+
errors='replace'
|
963 |
+
)
|
964 |
+
|
965 |
+
if append_history:
|
966 |
+
history.append((query, response))
|
967 |
+
|
968 |
+
return response, history
|
969 |
+
|
970 |
+
def chat_stream(
|
971 |
+
self,
|
972 |
+
tokenizer: PreTrainedTokenizer,
|
973 |
+
query: str,
|
974 |
+
history: Optional[HistoryType],
|
975 |
+
system: str = "You are a helpful assistant.",
|
976 |
+
stop_words_ids: Optional[List[List[int]]] = None,
|
977 |
+
logits_processor: Optional[LogitsProcessorList] = None,
|
978 |
+
generation_config: Optional[GenerationConfig] = None,
|
979 |
+
**kwargs,
|
980 |
+
) -> Generator[str, Any, None]:
|
981 |
+
generation_config = generation_config if generation_config is not None else self.generation_config
|
982 |
+
assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
|
983 |
+
if history is None:
|
984 |
+
history = []
|
985 |
+
if stop_words_ids is None:
|
986 |
+
stop_words_ids = []
|
987 |
+
|
988 |
+
max_window_size = kwargs.get('max_window_size', None)
|
989 |
+
if max_window_size is None:
|
990 |
+
max_window_size = generation_config.max_window_size
|
991 |
+
raw_text, context_tokens = make_context(
|
992 |
+
tokenizer,
|
993 |
+
query,
|
994 |
+
history=history,
|
995 |
+
system=system,
|
996 |
+
max_window_size=max_window_size,
|
997 |
+
chat_format=generation_config.chat_format,
|
998 |
+
)
|
999 |
+
|
1000 |
+
stop_words_ids.extend(get_stop_words_ids(
|
1001 |
+
generation_config.chat_format, tokenizer
|
1002 |
+
))
|
1003 |
+
if stop_words_ids is not None:
|
1004 |
+
stop_words_logits_processor = StopWordsLogitsProcessor(
|
1005 |
+
stop_words_ids=stop_words_ids,
|
1006 |
+
eos_token_id=generation_config.eos_token_id,
|
1007 |
+
)
|
1008 |
+
if logits_processor is None:
|
1009 |
+
logits_processor = LogitsProcessorList([stop_words_logits_processor])
|
1010 |
+
else:
|
1011 |
+
logits_processor.append(stop_words_logits_processor)
|
1012 |
+
input_ids = torch.tensor([context_tokens]).to(self.device)
|
1013 |
+
|
1014 |
+
from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
|
1015 |
+
self.__class__.generate_stream = NewGenerationMixin.generate
|
1016 |
+
self.__class__.sample_stream = NewGenerationMixin.sample_stream
|
1017 |
+
stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
|
1018 |
+
|
1019 |
+
def stream_generator():
|
1020 |
+
outputs = []
|
1021 |
+
for token in self.generate_stream(
|
1022 |
+
input_ids,
|
1023 |
+
return_dict_in_generate=False,
|
1024 |
+
generation_config=stream_config,
|
1025 |
+
logits_processor=logits_processor,
|
1026 |
+
seed=-1,
|
1027 |
+
**kwargs):
|
1028 |
+
outputs.append(token.item())
|
1029 |
+
yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore', keep_image_special=True)
|
1030 |
+
|
1031 |
+
return stream_generator()
|
1032 |
+
|
1033 |
+
def generate(
|
1034 |
+
self,
|
1035 |
+
inputs: Optional[torch.Tensor] = None,
|
1036 |
+
generation_config: Optional[GenerationConfig] = None,
|
1037 |
+
logits_processor: Optional[LogitsProcessorList] = None,
|
1038 |
+
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
1039 |
+
prefix_allowed_tokens_fn: Optional[
|
1040 |
+
Callable[[int, torch.Tensor], List[int]]
|
1041 |
+
] = None,
|
1042 |
+
synced_gpus: Optional[bool] = None,
|
1043 |
+
assistant_model: Optional["PreTrainedModel"] = None,
|
1044 |
+
streamer: Optional["BaseStreamer"] = None,
|
1045 |
+
**kwargs,
|
1046 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
1047 |
+
generation_config = generation_config if generation_config is not None else self.generation_config
|
1048 |
+
|
1049 |
+
# Process stop_words_ids.
|
1050 |
+
stop_words_ids = kwargs.pop("stop_words_ids", None)
|
1051 |
+
if stop_words_ids is None and generation_config is not None:
|
1052 |
+
stop_words_ids = getattr(generation_config, "stop_words_ids", None)
|
1053 |
+
if stop_words_ids is None:
|
1054 |
+
stop_words_ids = getattr(generation_config, "stop_words_ids", None)
|
1055 |
+
|
1056 |
+
if stop_words_ids is not None:
|
1057 |
+
stop_words_logits_processor = StopWordsLogitsProcessor(
|
1058 |
+
stop_words_ids=stop_words_ids,
|
1059 |
+
eos_token_id=generation_config.eos_token_id,
|
1060 |
+
)
|
1061 |
+
if logits_processor is None:
|
1062 |
+
logits_processor = LogitsProcessorList([stop_words_logits_processor])
|
1063 |
+
else:
|
1064 |
+
logits_processor.append(stop_words_logits_processor)
|
1065 |
+
|
1066 |
+
return super().generate(
|
1067 |
+
inputs,
|
1068 |
+
generation_config=generation_config,
|
1069 |
+
logits_processor=logits_processor,
|
1070 |
+
stopping_criteria=stopping_criteria,
|
1071 |
+
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
1072 |
+
synced_gpus=synced_gpus,
|
1073 |
+
assistant_model=assistant_model,
|
1074 |
+
streamer=streamer,
|
1075 |
+
**kwargs,
|
1076 |
+
)
|
1077 |
+
|
1078 |
+
|
1079 |
+
class RotaryEmbedding(torch.nn.Module):
|
1080 |
+
def __init__(self, dim, base=10000):
|
1081 |
+
super().__init__()
|
1082 |
+
self.dim = dim
|
1083 |
+
self.base = base
|
1084 |
+
self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
|
1085 |
+
if importlib.util.find_spec("einops") is None:
|
1086 |
+
raise RuntimeError("einops is required for Rotary Embedding")
|
1087 |
+
|
1088 |
+
self._rotary_pos_emb_cache = None
|
1089 |
+
self._seq_len_cached = 0
|
1090 |
+
self._ntk_alpha_cached = 1.0
|
1091 |
+
|
1092 |
+
def update_rotary_pos_emb_cache(self, max_seq_len, offset=0, ntk_alpha=1.0):
|
1093 |
+
seqlen = max_seq_len + offset
|
1094 |
+
if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
|
1095 |
+
base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
|
1096 |
+
self.inv_freq = 1.0 / (
|
1097 |
+
base
|
1098 |
+
** (
|
1099 |
+
torch.arange(0, self.dim, 2, device=self.inv_freq.device).float()
|
1100 |
+
/ self.dim
|
1101 |
+
)
|
1102 |
+
)
|
1103 |
+
self._seq_len_cached = max(2 * seqlen, 16)
|
1104 |
+
self._ntk_alpha_cached = ntk_alpha
|
1105 |
+
seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
|
1106 |
+
freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
|
1107 |
+
|
1108 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
1109 |
+
from einops import rearrange
|
1110 |
+
|
1111 |
+
emb = rearrange(emb, "n d -> 1 n 1 d")
|
1112 |
+
|
1113 |
+
cos, sin = emb.cos(), emb.sin()
|
1114 |
+
self._rotary_pos_emb_cache = [cos, sin]
|
1115 |
+
|
1116 |
+
def forward(self, max_seq_len, offset=0, ntk_alpha=1.0):
|
1117 |
+
self.update_rotary_pos_emb_cache(max_seq_len, offset, ntk_alpha)
|
1118 |
+
cos, sin = self._rotary_pos_emb_cache
|
1119 |
+
return [cos[:, offset : offset + max_seq_len], sin[:, offset : offset + max_seq_len]]
|
1120 |
+
|
1121 |
+
|
1122 |
+
def _rotate_half(x):
|
1123 |
+
from einops import rearrange
|
1124 |
+
|
1125 |
+
x = rearrange(x, "... (j d) -> ... j d", j=2)
|
1126 |
+
x1, x2 = x.unbind(dim=-2)
|
1127 |
+
return torch.cat((-x2, x1), dim=-1)
|
1128 |
+
|
1129 |
+
|
1130 |
+
def apply_rotary_pos_emb(t, freqs):
|
1131 |
+
cos, sin = freqs
|
1132 |
+
if apply_rotary_emb_func is not None and t.is_cuda:
|
1133 |
+
t_ = t.float()
|
1134 |
+
cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]
|
1135 |
+
sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]
|
1136 |
+
output = apply_rotary_emb_func(t_, cos, sin).type_as(t)
|
1137 |
+
return output
|
1138 |
+
else:
|
1139 |
+
rot_dim = freqs[0].shape[-1]
|
1140 |
+
cos, sin = freqs
|
1141 |
+
t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]
|
1142 |
+
t_ = t_.float()
|
1143 |
+
t_pass_ = t_pass_.float()
|
1144 |
+
t_ = (t_ * cos) + (_rotate_half(t_) * sin)
|
1145 |
+
return torch.cat((t_, t_pass_), dim=-1).type_as(t)
|
1146 |
+
|
1147 |
+
|
1148 |
+
class RMSNorm(torch.nn.Module):
|
1149 |
+
def __init__(self, dim: int, eps: float = 1e-6):
|
1150 |
+
super().__init__()
|
1151 |
+
self.eps = eps
|
1152 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
1153 |
+
|
1154 |
+
def _norm(self, x):
|
1155 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
1156 |
+
|
1157 |
+
def forward(self, x):
|
1158 |
+
if rms_norm is not None and x.is_cuda:
|
1159 |
+
return rms_norm(x, self.weight, self.eps)
|
1160 |
+
else:
|
1161 |
+
output = self._norm(x.float()).type_as(x)
|
1162 |
+
return output * self.weight
|
Qwen-VL-Chat/pytorch_model-00001-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d63e4b4238be3897d3b44d0f604422fc07dfceaf971ebde7adadd7be7a2a35bb
|
3 |
+
size 1964070255
|
Qwen-VL-Chat/pytorch_model-00002-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a14062dc6abcccda58a4d4ab346b0990a7fc6066b571eddfd96b92b0024c32be
|
3 |
+
size 1933791629
|
Qwen-VL-Chat/pytorch_model-00003-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72051aea446b6961d937ff9586adb5aea2ba435d490daa31d1332bd6c7be682f
|
3 |
+
size 1933791629
|
Qwen-VL-Chat/pytorch_model-00004-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01292b3d0c48b2141e247625699f5e5889655ffc4c7f2bcf92f3692c24b21234
|
3 |
+
size 1990406267
|
Qwen-VL-Chat/pytorch_model-00005-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75544b895cfb4ac8f59c5bf27a17c355a6e380ec0b83ee27298cc2463abb9914
|
3 |
+
size 1923281019
|
Qwen-VL-Chat/pytorch_model-00006-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f31964509ef34c82ab95eaac98121eeac193a1828a53abc2801b75b9fced230
|
3 |
+
size 1933783163
|
Qwen-VL-Chat/pytorch_model-00007-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dc41b294fe96db7800587308d9235970490e5862d83ee50407be61085589e24
|
3 |
+
size 1933791693
|
Qwen-VL-Chat/pytorch_model-00008-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:164383ddd886d8e18de072be7632c258e6127a885e73cc761056ade7ff62abca
|
3 |
+
size 1975364797
|
Qwen-VL-Chat/pytorch_model-00009-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:930b17b36aa050b7ed1853d65f8f2e72a9e944916fdb3f002a7f6d6963ec91be
|
3 |
+
size 1994920659
|
Qwen-VL-Chat/pytorch_model-00010-of-00010.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f6ecfbabe9f0868fa2fca4cf726d5d956850e516a12a95aea63b37abc2c27d6
|
3 |
+
size 1730967567
|
Qwen-VL-Chat/pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,860 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 19313870336
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "pytorch_model-00010-of-00010.bin",
|
7 |
+
"transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00010.bin",
|
8 |
+
"transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00010.bin",
|
9 |
+
"transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00010.bin",
|
10 |
+
"transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00010.bin",
|
11 |
+
"transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00010.bin",
|
12 |
+
"transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00010.bin",
|
13 |
+
"transformer.h.0.mlp.w1.weight": "pytorch_model-00001-of-00010.bin",
|
14 |
+
"transformer.h.0.mlp.w2.weight": "pytorch_model-00001-of-00010.bin",
|
15 |
+
"transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00010.bin",
|
16 |
+
"transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00010.bin",
|
17 |
+
"transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00010.bin",
|
18 |
+
"transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00010.bin",
|
19 |
+
"transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00010.bin",
|
20 |
+
"transformer.h.1.mlp.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
21 |
+
"transformer.h.1.mlp.w1.weight": "pytorch_model-00001-of-00010.bin",
|
22 |
+
"transformer.h.1.mlp.w2.weight": "pytorch_model-00001-of-00010.bin",
|
23 |
+
"transformer.h.10.attn.c_attn.bias": "pytorch_model-00003-of-00010.bin",
|
24 |
+
"transformer.h.10.attn.c_attn.weight": "pytorch_model-00003-of-00010.bin",
|
25 |
+
"transformer.h.10.attn.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
26 |
+
"transformer.h.10.ln_1.weight": "pytorch_model-00003-of-00010.bin",
|
27 |
+
"transformer.h.10.ln_2.weight": "pytorch_model-00003-of-00010.bin",
|
28 |
+
"transformer.h.10.mlp.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
29 |
+
"transformer.h.10.mlp.w1.weight": "pytorch_model-00003-of-00010.bin",
|
30 |
+
"transformer.h.10.mlp.w2.weight": "pytorch_model-00003-of-00010.bin",
|
31 |
+
"transformer.h.11.attn.c_attn.bias": "pytorch_model-00003-of-00010.bin",
|
32 |
+
"transformer.h.11.attn.c_attn.weight": "pytorch_model-00003-of-00010.bin",
|
33 |
+
"transformer.h.11.attn.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
34 |
+
"transformer.h.11.ln_1.weight": "pytorch_model-00003-of-00010.bin",
|
35 |
+
"transformer.h.11.ln_2.weight": "pytorch_model-00003-of-00010.bin",
|
36 |
+
"transformer.h.11.mlp.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
37 |
+
"transformer.h.11.mlp.w1.weight": "pytorch_model-00004-of-00010.bin",
|
38 |
+
"transformer.h.11.mlp.w2.weight": "pytorch_model-00004-of-00010.bin",
|
39 |
+
"transformer.h.12.attn.c_attn.bias": "pytorch_model-00004-of-00010.bin",
|
40 |
+
"transformer.h.12.attn.c_attn.weight": "pytorch_model-00004-of-00010.bin",
|
41 |
+
"transformer.h.12.attn.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
42 |
+
"transformer.h.12.ln_1.weight": "pytorch_model-00004-of-00010.bin",
|
43 |
+
"transformer.h.12.ln_2.weight": "pytorch_model-00004-of-00010.bin",
|
44 |
+
"transformer.h.12.mlp.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
45 |
+
"transformer.h.12.mlp.w1.weight": "pytorch_model-00004-of-00010.bin",
|
46 |
+
"transformer.h.12.mlp.w2.weight": "pytorch_model-00004-of-00010.bin",
|
47 |
+
"transformer.h.13.attn.c_attn.bias": "pytorch_model-00004-of-00010.bin",
|
48 |
+
"transformer.h.13.attn.c_attn.weight": "pytorch_model-00004-of-00010.bin",
|
49 |
+
"transformer.h.13.attn.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
50 |
+
"transformer.h.13.ln_1.weight": "pytorch_model-00004-of-00010.bin",
|
51 |
+
"transformer.h.13.ln_2.weight": "pytorch_model-00004-of-00010.bin",
|
52 |
+
"transformer.h.13.mlp.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
53 |
+
"transformer.h.13.mlp.w1.weight": "pytorch_model-00004-of-00010.bin",
|
54 |
+
"transformer.h.13.mlp.w2.weight": "pytorch_model-00004-of-00010.bin",
|
55 |
+
"transformer.h.14.attn.c_attn.bias": "pytorch_model-00004-of-00010.bin",
|
56 |
+
"transformer.h.14.attn.c_attn.weight": "pytorch_model-00004-of-00010.bin",
|
57 |
+
"transformer.h.14.attn.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
58 |
+
"transformer.h.14.ln_1.weight": "pytorch_model-00004-of-00010.bin",
|
59 |
+
"transformer.h.14.ln_2.weight": "pytorch_model-00004-of-00010.bin",
|
60 |
+
"transformer.h.14.mlp.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
61 |
+
"transformer.h.14.mlp.w1.weight": "pytorch_model-00004-of-00010.bin",
|
62 |
+
"transformer.h.14.mlp.w2.weight": "pytorch_model-00004-of-00010.bin",
|
63 |
+
"transformer.h.15.attn.c_attn.bias": "pytorch_model-00004-of-00010.bin",
|
64 |
+
"transformer.h.15.attn.c_attn.weight": "pytorch_model-00004-of-00010.bin",
|
65 |
+
"transformer.h.15.attn.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
66 |
+
"transformer.h.15.ln_1.weight": "pytorch_model-00004-of-00010.bin",
|
67 |
+
"transformer.h.15.ln_2.weight": "pytorch_model-00004-of-00010.bin",
|
68 |
+
"transformer.h.15.mlp.c_proj.weight": "pytorch_model-00004-of-00010.bin",
|
69 |
+
"transformer.h.15.mlp.w1.weight": "pytorch_model-00004-of-00010.bin",
|
70 |
+
"transformer.h.15.mlp.w2.weight": "pytorch_model-00004-of-00010.bin",
|
71 |
+
"transformer.h.16.attn.c_attn.bias": "pytorch_model-00004-of-00010.bin",
|
72 |
+
"transformer.h.16.attn.c_attn.weight": "pytorch_model-00004-of-00010.bin",
|
73 |
+
"transformer.h.16.attn.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
74 |
+
"transformer.h.16.ln_1.weight": "pytorch_model-00004-of-00010.bin",
|
75 |
+
"transformer.h.16.ln_2.weight": "pytorch_model-00005-of-00010.bin",
|
76 |
+
"transformer.h.16.mlp.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
77 |
+
"transformer.h.16.mlp.w1.weight": "pytorch_model-00005-of-00010.bin",
|
78 |
+
"transformer.h.16.mlp.w2.weight": "pytorch_model-00005-of-00010.bin",
|
79 |
+
"transformer.h.17.attn.c_attn.bias": "pytorch_model-00005-of-00010.bin",
|
80 |
+
"transformer.h.17.attn.c_attn.weight": "pytorch_model-00005-of-00010.bin",
|
81 |
+
"transformer.h.17.attn.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
82 |
+
"transformer.h.17.ln_1.weight": "pytorch_model-00005-of-00010.bin",
|
83 |
+
"transformer.h.17.ln_2.weight": "pytorch_model-00005-of-00010.bin",
|
84 |
+
"transformer.h.17.mlp.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
85 |
+
"transformer.h.17.mlp.w1.weight": "pytorch_model-00005-of-00010.bin",
|
86 |
+
"transformer.h.17.mlp.w2.weight": "pytorch_model-00005-of-00010.bin",
|
87 |
+
"transformer.h.18.attn.c_attn.bias": "pytorch_model-00005-of-00010.bin",
|
88 |
+
"transformer.h.18.attn.c_attn.weight": "pytorch_model-00005-of-00010.bin",
|
89 |
+
"transformer.h.18.attn.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
90 |
+
"transformer.h.18.ln_1.weight": "pytorch_model-00005-of-00010.bin",
|
91 |
+
"transformer.h.18.ln_2.weight": "pytorch_model-00005-of-00010.bin",
|
92 |
+
"transformer.h.18.mlp.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
93 |
+
"transformer.h.18.mlp.w1.weight": "pytorch_model-00005-of-00010.bin",
|
94 |
+
"transformer.h.18.mlp.w2.weight": "pytorch_model-00005-of-00010.bin",
|
95 |
+
"transformer.h.19.attn.c_attn.bias": "pytorch_model-00005-of-00010.bin",
|
96 |
+
"transformer.h.19.attn.c_attn.weight": "pytorch_model-00005-of-00010.bin",
|
97 |
+
"transformer.h.19.attn.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
98 |
+
"transformer.h.19.ln_1.weight": "pytorch_model-00005-of-00010.bin",
|
99 |
+
"transformer.h.19.ln_2.weight": "pytorch_model-00005-of-00010.bin",
|
100 |
+
"transformer.h.19.mlp.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
101 |
+
"transformer.h.19.mlp.w1.weight": "pytorch_model-00005-of-00010.bin",
|
102 |
+
"transformer.h.19.mlp.w2.weight": "pytorch_model-00005-of-00010.bin",
|
103 |
+
"transformer.h.2.attn.c_attn.bias": "pytorch_model-00002-of-00010.bin",
|
104 |
+
"transformer.h.2.attn.c_attn.weight": "pytorch_model-00002-of-00010.bin",
|
105 |
+
"transformer.h.2.attn.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
106 |
+
"transformer.h.2.ln_1.weight": "pytorch_model-00002-of-00010.bin",
|
107 |
+
"transformer.h.2.ln_2.weight": "pytorch_model-00002-of-00010.bin",
|
108 |
+
"transformer.h.2.mlp.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
109 |
+
"transformer.h.2.mlp.w1.weight": "pytorch_model-00002-of-00010.bin",
|
110 |
+
"transformer.h.2.mlp.w2.weight": "pytorch_model-00002-of-00010.bin",
|
111 |
+
"transformer.h.20.attn.c_attn.bias": "pytorch_model-00005-of-00010.bin",
|
112 |
+
"transformer.h.20.attn.c_attn.weight": "pytorch_model-00005-of-00010.bin",
|
113 |
+
"transformer.h.20.attn.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
114 |
+
"transformer.h.20.ln_1.weight": "pytorch_model-00005-of-00010.bin",
|
115 |
+
"transformer.h.20.ln_2.weight": "pytorch_model-00005-of-00010.bin",
|
116 |
+
"transformer.h.20.mlp.c_proj.weight": "pytorch_model-00005-of-00010.bin",
|
117 |
+
"transformer.h.20.mlp.w1.weight": "pytorch_model-00005-of-00010.bin",
|
118 |
+
"transformer.h.20.mlp.w2.weight": "pytorch_model-00005-of-00010.bin",
|
119 |
+
"transformer.h.21.attn.c_attn.bias": "pytorch_model-00006-of-00010.bin",
|
120 |
+
"transformer.h.21.attn.c_attn.weight": "pytorch_model-00006-of-00010.bin",
|
121 |
+
"transformer.h.21.attn.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
122 |
+
"transformer.h.21.ln_1.weight": "pytorch_model-00005-of-00010.bin",
|
123 |
+
"transformer.h.21.ln_2.weight": "pytorch_model-00006-of-00010.bin",
|
124 |
+
"transformer.h.21.mlp.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
125 |
+
"transformer.h.21.mlp.w1.weight": "pytorch_model-00006-of-00010.bin",
|
126 |
+
"transformer.h.21.mlp.w2.weight": "pytorch_model-00006-of-00010.bin",
|
127 |
+
"transformer.h.22.attn.c_attn.bias": "pytorch_model-00006-of-00010.bin",
|
128 |
+
"transformer.h.22.attn.c_attn.weight": "pytorch_model-00006-of-00010.bin",
|
129 |
+
"transformer.h.22.attn.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
130 |
+
"transformer.h.22.ln_1.weight": "pytorch_model-00006-of-00010.bin",
|
131 |
+
"transformer.h.22.ln_2.weight": "pytorch_model-00006-of-00010.bin",
|
132 |
+
"transformer.h.22.mlp.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
133 |
+
"transformer.h.22.mlp.w1.weight": "pytorch_model-00006-of-00010.bin",
|
134 |
+
"transformer.h.22.mlp.w2.weight": "pytorch_model-00006-of-00010.bin",
|
135 |
+
"transformer.h.23.attn.c_attn.bias": "pytorch_model-00006-of-00010.bin",
|
136 |
+
"transformer.h.23.attn.c_attn.weight": "pytorch_model-00006-of-00010.bin",
|
137 |
+
"transformer.h.23.attn.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
138 |
+
"transformer.h.23.ln_1.weight": "pytorch_model-00006-of-00010.bin",
|
139 |
+
"transformer.h.23.ln_2.weight": "pytorch_model-00006-of-00010.bin",
|
140 |
+
"transformer.h.23.mlp.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
141 |
+
"transformer.h.23.mlp.w1.weight": "pytorch_model-00006-of-00010.bin",
|
142 |
+
"transformer.h.23.mlp.w2.weight": "pytorch_model-00006-of-00010.bin",
|
143 |
+
"transformer.h.24.attn.c_attn.bias": "pytorch_model-00006-of-00010.bin",
|
144 |
+
"transformer.h.24.attn.c_attn.weight": "pytorch_model-00006-of-00010.bin",
|
145 |
+
"transformer.h.24.attn.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
146 |
+
"transformer.h.24.ln_1.weight": "pytorch_model-00006-of-00010.bin",
|
147 |
+
"transformer.h.24.ln_2.weight": "pytorch_model-00006-of-00010.bin",
|
148 |
+
"transformer.h.24.mlp.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
149 |
+
"transformer.h.24.mlp.w1.weight": "pytorch_model-00006-of-00010.bin",
|
150 |
+
"transformer.h.24.mlp.w2.weight": "pytorch_model-00006-of-00010.bin",
|
151 |
+
"transformer.h.25.attn.c_attn.bias": "pytorch_model-00006-of-00010.bin",
|
152 |
+
"transformer.h.25.attn.c_attn.weight": "pytorch_model-00006-of-00010.bin",
|
153 |
+
"transformer.h.25.attn.c_proj.weight": "pytorch_model-00006-of-00010.bin",
|
154 |
+
"transformer.h.25.ln_1.weight": "pytorch_model-00006-of-00010.bin",
|
155 |
+
"transformer.h.25.ln_2.weight": "pytorch_model-00006-of-00010.bin",
|
156 |
+
"transformer.h.25.mlp.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
157 |
+
"transformer.h.25.mlp.w1.weight": "pytorch_model-00006-of-00010.bin",
|
158 |
+
"transformer.h.25.mlp.w2.weight": "pytorch_model-00006-of-00010.bin",
|
159 |
+
"transformer.h.26.attn.c_attn.bias": "pytorch_model-00007-of-00010.bin",
|
160 |
+
"transformer.h.26.attn.c_attn.weight": "pytorch_model-00007-of-00010.bin",
|
161 |
+
"transformer.h.26.attn.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
162 |
+
"transformer.h.26.ln_1.weight": "pytorch_model-00007-of-00010.bin",
|
163 |
+
"transformer.h.26.ln_2.weight": "pytorch_model-00007-of-00010.bin",
|
164 |
+
"transformer.h.26.mlp.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
165 |
+
"transformer.h.26.mlp.w1.weight": "pytorch_model-00007-of-00010.bin",
|
166 |
+
"transformer.h.26.mlp.w2.weight": "pytorch_model-00007-of-00010.bin",
|
167 |
+
"transformer.h.27.attn.c_attn.bias": "pytorch_model-00007-of-00010.bin",
|
168 |
+
"transformer.h.27.attn.c_attn.weight": "pytorch_model-00007-of-00010.bin",
|
169 |
+
"transformer.h.27.attn.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
170 |
+
"transformer.h.27.ln_1.weight": "pytorch_model-00007-of-00010.bin",
|
171 |
+
"transformer.h.27.ln_2.weight": "pytorch_model-00007-of-00010.bin",
|
172 |
+
"transformer.h.27.mlp.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
173 |
+
"transformer.h.27.mlp.w1.weight": "pytorch_model-00007-of-00010.bin",
|
174 |
+
"transformer.h.27.mlp.w2.weight": "pytorch_model-00007-of-00010.bin",
|
175 |
+
"transformer.h.28.attn.c_attn.bias": "pytorch_model-00007-of-00010.bin",
|
176 |
+
"transformer.h.28.attn.c_attn.weight": "pytorch_model-00007-of-00010.bin",
|
177 |
+
"transformer.h.28.attn.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
178 |
+
"transformer.h.28.ln_1.weight": "pytorch_model-00007-of-00010.bin",
|
179 |
+
"transformer.h.28.ln_2.weight": "pytorch_model-00007-of-00010.bin",
|
180 |
+
"transformer.h.28.mlp.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
181 |
+
"transformer.h.28.mlp.w1.weight": "pytorch_model-00007-of-00010.bin",
|
182 |
+
"transformer.h.28.mlp.w2.weight": "pytorch_model-00007-of-00010.bin",
|
183 |
+
"transformer.h.29.attn.c_attn.bias": "pytorch_model-00007-of-00010.bin",
|
184 |
+
"transformer.h.29.attn.c_attn.weight": "pytorch_model-00007-of-00010.bin",
|
185 |
+
"transformer.h.29.attn.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
186 |
+
"transformer.h.29.ln_1.weight": "pytorch_model-00007-of-00010.bin",
|
187 |
+
"transformer.h.29.ln_2.weight": "pytorch_model-00007-of-00010.bin",
|
188 |
+
"transformer.h.29.mlp.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
189 |
+
"transformer.h.29.mlp.w1.weight": "pytorch_model-00007-of-00010.bin",
|
190 |
+
"transformer.h.29.mlp.w2.weight": "pytorch_model-00007-of-00010.bin",
|
191 |
+
"transformer.h.3.attn.c_attn.bias": "pytorch_model-00002-of-00010.bin",
|
192 |
+
"transformer.h.3.attn.c_attn.weight": "pytorch_model-00002-of-00010.bin",
|
193 |
+
"transformer.h.3.attn.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
194 |
+
"transformer.h.3.ln_1.weight": "pytorch_model-00002-of-00010.bin",
|
195 |
+
"transformer.h.3.ln_2.weight": "pytorch_model-00002-of-00010.bin",
|
196 |
+
"transformer.h.3.mlp.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
197 |
+
"transformer.h.3.mlp.w1.weight": "pytorch_model-00002-of-00010.bin",
|
198 |
+
"transformer.h.3.mlp.w2.weight": "pytorch_model-00002-of-00010.bin",
|
199 |
+
"transformer.h.30.attn.c_attn.bias": "pytorch_model-00007-of-00010.bin",
|
200 |
+
"transformer.h.30.attn.c_attn.weight": "pytorch_model-00007-of-00010.bin",
|
201 |
+
"transformer.h.30.attn.c_proj.weight": "pytorch_model-00007-of-00010.bin",
|
202 |
+
"transformer.h.30.ln_1.weight": "pytorch_model-00007-of-00010.bin",
|
203 |
+
"transformer.h.30.ln_2.weight": "pytorch_model-00007-of-00010.bin",
|
204 |
+
"transformer.h.30.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
205 |
+
"transformer.h.30.mlp.w1.weight": "pytorch_model-00007-of-00010.bin",
|
206 |
+
"transformer.h.30.mlp.w2.weight": "pytorch_model-00008-of-00010.bin",
|
207 |
+
"transformer.h.31.attn.c_attn.bias": "pytorch_model-00008-of-00010.bin",
|
208 |
+
"transformer.h.31.attn.c_attn.weight": "pytorch_model-00008-of-00010.bin",
|
209 |
+
"transformer.h.31.attn.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
210 |
+
"transformer.h.31.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
211 |
+
"transformer.h.31.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
212 |
+
"transformer.h.31.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
213 |
+
"transformer.h.31.mlp.w1.weight": "pytorch_model-00008-of-00010.bin",
|
214 |
+
"transformer.h.31.mlp.w2.weight": "pytorch_model-00008-of-00010.bin",
|
215 |
+
"transformer.h.4.attn.c_attn.bias": "pytorch_model-00002-of-00010.bin",
|
216 |
+
"transformer.h.4.attn.c_attn.weight": "pytorch_model-00002-of-00010.bin",
|
217 |
+
"transformer.h.4.attn.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
218 |
+
"transformer.h.4.ln_1.weight": "pytorch_model-00002-of-00010.bin",
|
219 |
+
"transformer.h.4.ln_2.weight": "pytorch_model-00002-of-00010.bin",
|
220 |
+
"transformer.h.4.mlp.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
221 |
+
"transformer.h.4.mlp.w1.weight": "pytorch_model-00002-of-00010.bin",
|
222 |
+
"transformer.h.4.mlp.w2.weight": "pytorch_model-00002-of-00010.bin",
|
223 |
+
"transformer.h.5.attn.c_attn.bias": "pytorch_model-00002-of-00010.bin",
|
224 |
+
"transformer.h.5.attn.c_attn.weight": "pytorch_model-00002-of-00010.bin",
|
225 |
+
"transformer.h.5.attn.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
226 |
+
"transformer.h.5.ln_1.weight": "pytorch_model-00002-of-00010.bin",
|
227 |
+
"transformer.h.5.ln_2.weight": "pytorch_model-00002-of-00010.bin",
|
228 |
+
"transformer.h.5.mlp.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
229 |
+
"transformer.h.5.mlp.w1.weight": "pytorch_model-00002-of-00010.bin",
|
230 |
+
"transformer.h.5.mlp.w2.weight": "pytorch_model-00002-of-00010.bin",
|
231 |
+
"transformer.h.6.attn.c_attn.bias": "pytorch_model-00002-of-00010.bin",
|
232 |
+
"transformer.h.6.attn.c_attn.weight": "pytorch_model-00002-of-00010.bin",
|
233 |
+
"transformer.h.6.attn.c_proj.weight": "pytorch_model-00002-of-00010.bin",
|
234 |
+
"transformer.h.6.ln_1.weight": "pytorch_model-00002-of-00010.bin",
|
235 |
+
"transformer.h.6.ln_2.weight": "pytorch_model-00002-of-00010.bin",
|
236 |
+
"transformer.h.6.mlp.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
237 |
+
"transformer.h.6.mlp.w1.weight": "pytorch_model-00002-of-00010.bin",
|
238 |
+
"transformer.h.6.mlp.w2.weight": "pytorch_model-00003-of-00010.bin",
|
239 |
+
"transformer.h.7.attn.c_attn.bias": "pytorch_model-00003-of-00010.bin",
|
240 |
+
"transformer.h.7.attn.c_attn.weight": "pytorch_model-00003-of-00010.bin",
|
241 |
+
"transformer.h.7.attn.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
242 |
+
"transformer.h.7.ln_1.weight": "pytorch_model-00003-of-00010.bin",
|
243 |
+
"transformer.h.7.ln_2.weight": "pytorch_model-00003-of-00010.bin",
|
244 |
+
"transformer.h.7.mlp.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
245 |
+
"transformer.h.7.mlp.w1.weight": "pytorch_model-00003-of-00010.bin",
|
246 |
+
"transformer.h.7.mlp.w2.weight": "pytorch_model-00003-of-00010.bin",
|
247 |
+
"transformer.h.8.attn.c_attn.bias": "pytorch_model-00003-of-00010.bin",
|
248 |
+
"transformer.h.8.attn.c_attn.weight": "pytorch_model-00003-of-00010.bin",
|
249 |
+
"transformer.h.8.attn.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
250 |
+
"transformer.h.8.ln_1.weight": "pytorch_model-00003-of-00010.bin",
|
251 |
+
"transformer.h.8.ln_2.weight": "pytorch_model-00003-of-00010.bin",
|
252 |
+
"transformer.h.8.mlp.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
253 |
+
"transformer.h.8.mlp.w1.weight": "pytorch_model-00003-of-00010.bin",
|
254 |
+
"transformer.h.8.mlp.w2.weight": "pytorch_model-00003-of-00010.bin",
|
255 |
+
"transformer.h.9.attn.c_attn.bias": "pytorch_model-00003-of-00010.bin",
|
256 |
+
"transformer.h.9.attn.c_attn.weight": "pytorch_model-00003-of-00010.bin",
|
257 |
+
"transformer.h.9.attn.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
258 |
+
"transformer.h.9.ln_1.weight": "pytorch_model-00003-of-00010.bin",
|
259 |
+
"transformer.h.9.ln_2.weight": "pytorch_model-00003-of-00010.bin",
|
260 |
+
"transformer.h.9.mlp.c_proj.weight": "pytorch_model-00003-of-00010.bin",
|
261 |
+
"transformer.h.9.mlp.w1.weight": "pytorch_model-00003-of-00010.bin",
|
262 |
+
"transformer.h.9.mlp.w2.weight": "pytorch_model-00003-of-00010.bin",
|
263 |
+
"transformer.ln_f.weight": "pytorch_model-00008-of-00010.bin",
|
264 |
+
"transformer.visual.attn_pool.attn.in_proj_bias": "pytorch_model-00010-of-00010.bin",
|
265 |
+
"transformer.visual.attn_pool.attn.in_proj_weight": "pytorch_model-00010-of-00010.bin",
|
266 |
+
"transformer.visual.attn_pool.attn.out_proj.bias": "pytorch_model-00010-of-00010.bin",
|
267 |
+
"transformer.visual.attn_pool.attn.out_proj.weight": "pytorch_model-00010-of-00010.bin",
|
268 |
+
"transformer.visual.attn_pool.kv_proj.weight": "pytorch_model-00010-of-00010.bin",
|
269 |
+
"transformer.visual.attn_pool.ln_kv.bias": "pytorch_model-00010-of-00010.bin",
|
270 |
+
"transformer.visual.attn_pool.ln_kv.weight": "pytorch_model-00010-of-00010.bin",
|
271 |
+
"transformer.visual.attn_pool.ln_q.bias": "pytorch_model-00010-of-00010.bin",
|
272 |
+
"transformer.visual.attn_pool.ln_q.weight": "pytorch_model-00010-of-00010.bin",
|
273 |
+
"transformer.visual.attn_pool.pos_embed": "pytorch_model-00010-of-00010.bin",
|
274 |
+
"transformer.visual.attn_pool.query": "pytorch_model-00010-of-00010.bin",
|
275 |
+
"transformer.visual.conv1.weight": "pytorch_model-00008-of-00010.bin",
|
276 |
+
"transformer.visual.ln_post.bias": "pytorch_model-00010-of-00010.bin",
|
277 |
+
"transformer.visual.ln_post.weight": "pytorch_model-00010-of-00010.bin",
|
278 |
+
"transformer.visual.ln_pre.bias": "pytorch_model-00008-of-00010.bin",
|
279 |
+
"transformer.visual.ln_pre.weight": "pytorch_model-00008-of-00010.bin",
|
280 |
+
"transformer.visual.positional_embedding": "pytorch_model-00008-of-00010.bin",
|
281 |
+
"transformer.visual.proj": "pytorch_model-00008-of-00010.bin",
|
282 |
+
"transformer.visual.transformer.resblocks.0.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
283 |
+
"transformer.visual.transformer.resblocks.0.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
284 |
+
"transformer.visual.transformer.resblocks.0.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
285 |
+
"transformer.visual.transformer.resblocks.0.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
286 |
+
"transformer.visual.transformer.resblocks.0.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
287 |
+
"transformer.visual.transformer.resblocks.0.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
288 |
+
"transformer.visual.transformer.resblocks.0.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
289 |
+
"transformer.visual.transformer.resblocks.0.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
290 |
+
"transformer.visual.transformer.resblocks.0.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
291 |
+
"transformer.visual.transformer.resblocks.0.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
292 |
+
"transformer.visual.transformer.resblocks.0.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
293 |
+
"transformer.visual.transformer.resblocks.0.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
294 |
+
"transformer.visual.transformer.resblocks.1.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
295 |
+
"transformer.visual.transformer.resblocks.1.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
296 |
+
"transformer.visual.transformer.resblocks.1.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
297 |
+
"transformer.visual.transformer.resblocks.1.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
298 |
+
"transformer.visual.transformer.resblocks.1.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
299 |
+
"transformer.visual.transformer.resblocks.1.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
300 |
+
"transformer.visual.transformer.resblocks.1.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
301 |
+
"transformer.visual.transformer.resblocks.1.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
302 |
+
"transformer.visual.transformer.resblocks.1.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
303 |
+
"transformer.visual.transformer.resblocks.1.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
304 |
+
"transformer.visual.transformer.resblocks.1.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
305 |
+
"transformer.visual.transformer.resblocks.1.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
306 |
+
"transformer.visual.transformer.resblocks.10.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
307 |
+
"transformer.visual.transformer.resblocks.10.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
308 |
+
"transformer.visual.transformer.resblocks.10.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
309 |
+
"transformer.visual.transformer.resblocks.10.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
310 |
+
"transformer.visual.transformer.resblocks.10.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
311 |
+
"transformer.visual.transformer.resblocks.10.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
312 |
+
"transformer.visual.transformer.resblocks.10.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
313 |
+
"transformer.visual.transformer.resblocks.10.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
314 |
+
"transformer.visual.transformer.resblocks.10.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
315 |
+
"transformer.visual.transformer.resblocks.10.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
316 |
+
"transformer.visual.transformer.resblocks.10.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
317 |
+
"transformer.visual.transformer.resblocks.10.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
318 |
+
"transformer.visual.transformer.resblocks.11.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
319 |
+
"transformer.visual.transformer.resblocks.11.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
320 |
+
"transformer.visual.transformer.resblocks.11.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
321 |
+
"transformer.visual.transformer.resblocks.11.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
322 |
+
"transformer.visual.transformer.resblocks.11.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
323 |
+
"transformer.visual.transformer.resblocks.11.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
324 |
+
"transformer.visual.transformer.resblocks.11.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
325 |
+
"transformer.visual.transformer.resblocks.11.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
326 |
+
"transformer.visual.transformer.resblocks.11.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
327 |
+
"transformer.visual.transformer.resblocks.11.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
328 |
+
"transformer.visual.transformer.resblocks.11.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
329 |
+
"transformer.visual.transformer.resblocks.11.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
330 |
+
"transformer.visual.transformer.resblocks.12.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
331 |
+
"transformer.visual.transformer.resblocks.12.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
332 |
+
"transformer.visual.transformer.resblocks.12.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
333 |
+
"transformer.visual.transformer.resblocks.12.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
334 |
+
"transformer.visual.transformer.resblocks.12.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
335 |
+
"transformer.visual.transformer.resblocks.12.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
336 |
+
"transformer.visual.transformer.resblocks.12.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
337 |
+
"transformer.visual.transformer.resblocks.12.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
338 |
+
"transformer.visual.transformer.resblocks.12.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
339 |
+
"transformer.visual.transformer.resblocks.12.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
340 |
+
"transformer.visual.transformer.resblocks.12.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
341 |
+
"transformer.visual.transformer.resblocks.12.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
342 |
+
"transformer.visual.transformer.resblocks.13.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
343 |
+
"transformer.visual.transformer.resblocks.13.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
344 |
+
"transformer.visual.transformer.resblocks.13.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
345 |
+
"transformer.visual.transformer.resblocks.13.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
346 |
+
"transformer.visual.transformer.resblocks.13.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
347 |
+
"transformer.visual.transformer.resblocks.13.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
348 |
+
"transformer.visual.transformer.resblocks.13.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
349 |
+
"transformer.visual.transformer.resblocks.13.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
350 |
+
"transformer.visual.transformer.resblocks.13.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
351 |
+
"transformer.visual.transformer.resblocks.13.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
352 |
+
"transformer.visual.transformer.resblocks.13.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
353 |
+
"transformer.visual.transformer.resblocks.13.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
354 |
+
"transformer.visual.transformer.resblocks.14.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
355 |
+
"transformer.visual.transformer.resblocks.14.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
356 |
+
"transformer.visual.transformer.resblocks.14.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
357 |
+
"transformer.visual.transformer.resblocks.14.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
358 |
+
"transformer.visual.transformer.resblocks.14.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
359 |
+
"transformer.visual.transformer.resblocks.14.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
360 |
+
"transformer.visual.transformer.resblocks.14.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
361 |
+
"transformer.visual.transformer.resblocks.14.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
362 |
+
"transformer.visual.transformer.resblocks.14.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
363 |
+
"transformer.visual.transformer.resblocks.14.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
364 |
+
"transformer.visual.transformer.resblocks.14.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
365 |
+
"transformer.visual.transformer.resblocks.14.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
366 |
+
"transformer.visual.transformer.resblocks.15.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
367 |
+
"transformer.visual.transformer.resblocks.15.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
368 |
+
"transformer.visual.transformer.resblocks.15.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
369 |
+
"transformer.visual.transformer.resblocks.15.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
370 |
+
"transformer.visual.transformer.resblocks.15.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
371 |
+
"transformer.visual.transformer.resblocks.15.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
372 |
+
"transformer.visual.transformer.resblocks.15.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
373 |
+
"transformer.visual.transformer.resblocks.15.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
374 |
+
"transformer.visual.transformer.resblocks.15.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
375 |
+
"transformer.visual.transformer.resblocks.15.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
376 |
+
"transformer.visual.transformer.resblocks.15.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
377 |
+
"transformer.visual.transformer.resblocks.15.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
378 |
+
"transformer.visual.transformer.resblocks.16.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
379 |
+
"transformer.visual.transformer.resblocks.16.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
380 |
+
"transformer.visual.transformer.resblocks.16.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
381 |
+
"transformer.visual.transformer.resblocks.16.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
382 |
+
"transformer.visual.transformer.resblocks.16.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
383 |
+
"transformer.visual.transformer.resblocks.16.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
384 |
+
"transformer.visual.transformer.resblocks.16.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
385 |
+
"transformer.visual.transformer.resblocks.16.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
386 |
+
"transformer.visual.transformer.resblocks.16.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
387 |
+
"transformer.visual.transformer.resblocks.16.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
388 |
+
"transformer.visual.transformer.resblocks.16.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
389 |
+
"transformer.visual.transformer.resblocks.16.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
390 |
+
"transformer.visual.transformer.resblocks.17.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
391 |
+
"transformer.visual.transformer.resblocks.17.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
392 |
+
"transformer.visual.transformer.resblocks.17.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
393 |
+
"transformer.visual.transformer.resblocks.17.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
394 |
+
"transformer.visual.transformer.resblocks.17.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
395 |
+
"transformer.visual.transformer.resblocks.17.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
396 |
+
"transformer.visual.transformer.resblocks.17.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
397 |
+
"transformer.visual.transformer.resblocks.17.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
398 |
+
"transformer.visual.transformer.resblocks.17.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
399 |
+
"transformer.visual.transformer.resblocks.17.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
400 |
+
"transformer.visual.transformer.resblocks.17.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
401 |
+
"transformer.visual.transformer.resblocks.17.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
402 |
+
"transformer.visual.transformer.resblocks.18.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
403 |
+
"transformer.visual.transformer.resblocks.18.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
404 |
+
"transformer.visual.transformer.resblocks.18.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
405 |
+
"transformer.visual.transformer.resblocks.18.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
406 |
+
"transformer.visual.transformer.resblocks.18.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
407 |
+
"transformer.visual.transformer.resblocks.18.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
408 |
+
"transformer.visual.transformer.resblocks.18.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
409 |
+
"transformer.visual.transformer.resblocks.18.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
410 |
+
"transformer.visual.transformer.resblocks.18.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
411 |
+
"transformer.visual.transformer.resblocks.18.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
412 |
+
"transformer.visual.transformer.resblocks.18.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
413 |
+
"transformer.visual.transformer.resblocks.18.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
414 |
+
"transformer.visual.transformer.resblocks.19.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
415 |
+
"transformer.visual.transformer.resblocks.19.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
416 |
+
"transformer.visual.transformer.resblocks.19.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
417 |
+
"transformer.visual.transformer.resblocks.19.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
418 |
+
"transformer.visual.transformer.resblocks.19.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
419 |
+
"transformer.visual.transformer.resblocks.19.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
420 |
+
"transformer.visual.transformer.resblocks.19.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
421 |
+
"transformer.visual.transformer.resblocks.19.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
422 |
+
"transformer.visual.transformer.resblocks.19.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
423 |
+
"transformer.visual.transformer.resblocks.19.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
424 |
+
"transformer.visual.transformer.resblocks.19.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
425 |
+
"transformer.visual.transformer.resblocks.19.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
426 |
+
"transformer.visual.transformer.resblocks.2.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
427 |
+
"transformer.visual.transformer.resblocks.2.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
428 |
+
"transformer.visual.transformer.resblocks.2.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
429 |
+
"transformer.visual.transformer.resblocks.2.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
430 |
+
"transformer.visual.transformer.resblocks.2.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
431 |
+
"transformer.visual.transformer.resblocks.2.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
432 |
+
"transformer.visual.transformer.resblocks.2.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
433 |
+
"transformer.visual.transformer.resblocks.2.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
434 |
+
"transformer.visual.transformer.resblocks.2.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
435 |
+
"transformer.visual.transformer.resblocks.2.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
436 |
+
"transformer.visual.transformer.resblocks.2.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
437 |
+
"transformer.visual.transformer.resblocks.2.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
438 |
+
"transformer.visual.transformer.resblocks.20.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
439 |
+
"transformer.visual.transformer.resblocks.20.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
440 |
+
"transformer.visual.transformer.resblocks.20.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
441 |
+
"transformer.visual.transformer.resblocks.20.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
442 |
+
"transformer.visual.transformer.resblocks.20.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
443 |
+
"transformer.visual.transformer.resblocks.20.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
444 |
+
"transformer.visual.transformer.resblocks.20.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
445 |
+
"transformer.visual.transformer.resblocks.20.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
446 |
+
"transformer.visual.transformer.resblocks.20.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
447 |
+
"transformer.visual.transformer.resblocks.20.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
448 |
+
"transformer.visual.transformer.resblocks.20.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
449 |
+
"transformer.visual.transformer.resblocks.20.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
450 |
+
"transformer.visual.transformer.resblocks.21.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
451 |
+
"transformer.visual.transformer.resblocks.21.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
452 |
+
"transformer.visual.transformer.resblocks.21.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
453 |
+
"transformer.visual.transformer.resblocks.21.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
454 |
+
"transformer.visual.transformer.resblocks.21.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
455 |
+
"transformer.visual.transformer.resblocks.21.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
456 |
+
"transformer.visual.transformer.resblocks.21.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
457 |
+
"transformer.visual.transformer.resblocks.21.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
458 |
+
"transformer.visual.transformer.resblocks.21.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
459 |
+
"transformer.visual.transformer.resblocks.21.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
460 |
+
"transformer.visual.transformer.resblocks.21.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
461 |
+
"transformer.visual.transformer.resblocks.21.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
462 |
+
"transformer.visual.transformer.resblocks.22.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
463 |
+
"transformer.visual.transformer.resblocks.22.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
464 |
+
"transformer.visual.transformer.resblocks.22.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
465 |
+
"transformer.visual.transformer.resblocks.22.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
466 |
+
"transformer.visual.transformer.resblocks.22.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
467 |
+
"transformer.visual.transformer.resblocks.22.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
468 |
+
"transformer.visual.transformer.resblocks.22.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
469 |
+
"transformer.visual.transformer.resblocks.22.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
470 |
+
"transformer.visual.transformer.resblocks.22.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
471 |
+
"transformer.visual.transformer.resblocks.22.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
472 |
+
"transformer.visual.transformer.resblocks.22.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
473 |
+
"transformer.visual.transformer.resblocks.22.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
474 |
+
"transformer.visual.transformer.resblocks.23.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
475 |
+
"transformer.visual.transformer.resblocks.23.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
476 |
+
"transformer.visual.transformer.resblocks.23.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
477 |
+
"transformer.visual.transformer.resblocks.23.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
478 |
+
"transformer.visual.transformer.resblocks.23.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
479 |
+
"transformer.visual.transformer.resblocks.23.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
480 |
+
"transformer.visual.transformer.resblocks.23.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
481 |
+
"transformer.visual.transformer.resblocks.23.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
482 |
+
"transformer.visual.transformer.resblocks.23.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
483 |
+
"transformer.visual.transformer.resblocks.23.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
484 |
+
"transformer.visual.transformer.resblocks.23.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
485 |
+
"transformer.visual.transformer.resblocks.23.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
486 |
+
"transformer.visual.transformer.resblocks.24.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
487 |
+
"transformer.visual.transformer.resblocks.24.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
488 |
+
"transformer.visual.transformer.resblocks.24.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
489 |
+
"transformer.visual.transformer.resblocks.24.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
490 |
+
"transformer.visual.transformer.resblocks.24.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
491 |
+
"transformer.visual.transformer.resblocks.24.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
492 |
+
"transformer.visual.transformer.resblocks.24.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
493 |
+
"transformer.visual.transformer.resblocks.24.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
494 |
+
"transformer.visual.transformer.resblocks.24.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
495 |
+
"transformer.visual.transformer.resblocks.24.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
496 |
+
"transformer.visual.transformer.resblocks.24.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
497 |
+
"transformer.visual.transformer.resblocks.24.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
498 |
+
"transformer.visual.transformer.resblocks.25.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
499 |
+
"transformer.visual.transformer.resblocks.25.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
500 |
+
"transformer.visual.transformer.resblocks.25.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
501 |
+
"transformer.visual.transformer.resblocks.25.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
502 |
+
"transformer.visual.transformer.resblocks.25.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
503 |
+
"transformer.visual.transformer.resblocks.25.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
504 |
+
"transformer.visual.transformer.resblocks.25.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
505 |
+
"transformer.visual.transformer.resblocks.25.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
506 |
+
"transformer.visual.transformer.resblocks.25.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
507 |
+
"transformer.visual.transformer.resblocks.25.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
508 |
+
"transformer.visual.transformer.resblocks.25.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
509 |
+
"transformer.visual.transformer.resblocks.25.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
510 |
+
"transformer.visual.transformer.resblocks.26.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
511 |
+
"transformer.visual.transformer.resblocks.26.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
512 |
+
"transformer.visual.transformer.resblocks.26.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
513 |
+
"transformer.visual.transformer.resblocks.26.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
514 |
+
"transformer.visual.transformer.resblocks.26.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
515 |
+
"transformer.visual.transformer.resblocks.26.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
516 |
+
"transformer.visual.transformer.resblocks.26.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
517 |
+
"transformer.visual.transformer.resblocks.26.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
518 |
+
"transformer.visual.transformer.resblocks.26.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
519 |
+
"transformer.visual.transformer.resblocks.26.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
520 |
+
"transformer.visual.transformer.resblocks.26.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
521 |
+
"transformer.visual.transformer.resblocks.26.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
522 |
+
"transformer.visual.transformer.resblocks.27.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
523 |
+
"transformer.visual.transformer.resblocks.27.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
524 |
+
"transformer.visual.transformer.resblocks.27.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
525 |
+
"transformer.visual.transformer.resblocks.27.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
526 |
+
"transformer.visual.transformer.resblocks.27.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
527 |
+
"transformer.visual.transformer.resblocks.27.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
528 |
+
"transformer.visual.transformer.resblocks.27.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
529 |
+
"transformer.visual.transformer.resblocks.27.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
530 |
+
"transformer.visual.transformer.resblocks.27.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
531 |
+
"transformer.visual.transformer.resblocks.27.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
532 |
+
"transformer.visual.transformer.resblocks.27.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
533 |
+
"transformer.visual.transformer.resblocks.27.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
534 |
+
"transformer.visual.transformer.resblocks.28.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
535 |
+
"transformer.visual.transformer.resblocks.28.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
536 |
+
"transformer.visual.transformer.resblocks.28.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
537 |
+
"transformer.visual.transformer.resblocks.28.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
538 |
+
"transformer.visual.transformer.resblocks.28.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
539 |
+
"transformer.visual.transformer.resblocks.28.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
540 |
+
"transformer.visual.transformer.resblocks.28.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
541 |
+
"transformer.visual.transformer.resblocks.28.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
542 |
+
"transformer.visual.transformer.resblocks.28.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
543 |
+
"transformer.visual.transformer.resblocks.28.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
544 |
+
"transformer.visual.transformer.resblocks.28.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
545 |
+
"transformer.visual.transformer.resblocks.28.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
546 |
+
"transformer.visual.transformer.resblocks.29.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
547 |
+
"transformer.visual.transformer.resblocks.29.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
548 |
+
"transformer.visual.transformer.resblocks.29.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
549 |
+
"transformer.visual.transformer.resblocks.29.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
550 |
+
"transformer.visual.transformer.resblocks.29.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
551 |
+
"transformer.visual.transformer.resblocks.29.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
552 |
+
"transformer.visual.transformer.resblocks.29.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
553 |
+
"transformer.visual.transformer.resblocks.29.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
554 |
+
"transformer.visual.transformer.resblocks.29.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
555 |
+
"transformer.visual.transformer.resblocks.29.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
556 |
+
"transformer.visual.transformer.resblocks.29.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
557 |
+
"transformer.visual.transformer.resblocks.29.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
558 |
+
"transformer.visual.transformer.resblocks.3.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
559 |
+
"transformer.visual.transformer.resblocks.3.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
560 |
+
"transformer.visual.transformer.resblocks.3.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
561 |
+
"transformer.visual.transformer.resblocks.3.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
562 |
+
"transformer.visual.transformer.resblocks.3.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
563 |
+
"transformer.visual.transformer.resblocks.3.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
564 |
+
"transformer.visual.transformer.resblocks.3.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
565 |
+
"transformer.visual.transformer.resblocks.3.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
566 |
+
"transformer.visual.transformer.resblocks.3.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
567 |
+
"transformer.visual.transformer.resblocks.3.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
568 |
+
"transformer.visual.transformer.resblocks.3.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
569 |
+
"transformer.visual.transformer.resblocks.3.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
570 |
+
"transformer.visual.transformer.resblocks.30.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
571 |
+
"transformer.visual.transformer.resblocks.30.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
572 |
+
"transformer.visual.transformer.resblocks.30.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
573 |
+
"transformer.visual.transformer.resblocks.30.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
574 |
+
"transformer.visual.transformer.resblocks.30.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
575 |
+
"transformer.visual.transformer.resblocks.30.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
576 |
+
"transformer.visual.transformer.resblocks.30.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
577 |
+
"transformer.visual.transformer.resblocks.30.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
578 |
+
"transformer.visual.transformer.resblocks.30.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
579 |
+
"transformer.visual.transformer.resblocks.30.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
580 |
+
"transformer.visual.transformer.resblocks.30.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
581 |
+
"transformer.visual.transformer.resblocks.30.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
582 |
+
"transformer.visual.transformer.resblocks.31.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
583 |
+
"transformer.visual.transformer.resblocks.31.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
584 |
+
"transformer.visual.transformer.resblocks.31.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
585 |
+
"transformer.visual.transformer.resblocks.31.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
586 |
+
"transformer.visual.transformer.resblocks.31.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
587 |
+
"transformer.visual.transformer.resblocks.31.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
588 |
+
"transformer.visual.transformer.resblocks.31.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
589 |
+
"transformer.visual.transformer.resblocks.31.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
590 |
+
"transformer.visual.transformer.resblocks.31.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
591 |
+
"transformer.visual.transformer.resblocks.31.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
592 |
+
"transformer.visual.transformer.resblocks.31.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
593 |
+
"transformer.visual.transformer.resblocks.31.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
594 |
+
"transformer.visual.transformer.resblocks.32.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
595 |
+
"transformer.visual.transformer.resblocks.32.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
596 |
+
"transformer.visual.transformer.resblocks.32.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
597 |
+
"transformer.visual.transformer.resblocks.32.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
598 |
+
"transformer.visual.transformer.resblocks.32.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
599 |
+
"transformer.visual.transformer.resblocks.32.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
600 |
+
"transformer.visual.transformer.resblocks.32.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
601 |
+
"transformer.visual.transformer.resblocks.32.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
602 |
+
"transformer.visual.transformer.resblocks.32.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
603 |
+
"transformer.visual.transformer.resblocks.32.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
604 |
+
"transformer.visual.transformer.resblocks.32.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
605 |
+
"transformer.visual.transformer.resblocks.32.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
606 |
+
"transformer.visual.transformer.resblocks.33.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
607 |
+
"transformer.visual.transformer.resblocks.33.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
608 |
+
"transformer.visual.transformer.resblocks.33.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
609 |
+
"transformer.visual.transformer.resblocks.33.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
610 |
+
"transformer.visual.transformer.resblocks.33.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
611 |
+
"transformer.visual.transformer.resblocks.33.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
612 |
+
"transformer.visual.transformer.resblocks.33.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
613 |
+
"transformer.visual.transformer.resblocks.33.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
614 |
+
"transformer.visual.transformer.resblocks.33.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
615 |
+
"transformer.visual.transformer.resblocks.33.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
616 |
+
"transformer.visual.transformer.resblocks.33.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
617 |
+
"transformer.visual.transformer.resblocks.33.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
618 |
+
"transformer.visual.transformer.resblocks.34.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
619 |
+
"transformer.visual.transformer.resblocks.34.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
620 |
+
"transformer.visual.transformer.resblocks.34.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
621 |
+
"transformer.visual.transformer.resblocks.34.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
622 |
+
"transformer.visual.transformer.resblocks.34.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
623 |
+
"transformer.visual.transformer.resblocks.34.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
624 |
+
"transformer.visual.transformer.resblocks.34.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
625 |
+
"transformer.visual.transformer.resblocks.34.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
626 |
+
"transformer.visual.transformer.resblocks.34.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
627 |
+
"transformer.visual.transformer.resblocks.34.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
628 |
+
"transformer.visual.transformer.resblocks.34.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
629 |
+
"transformer.visual.transformer.resblocks.34.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
630 |
+
"transformer.visual.transformer.resblocks.35.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
631 |
+
"transformer.visual.transformer.resblocks.35.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
632 |
+
"transformer.visual.transformer.resblocks.35.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
633 |
+
"transformer.visual.transformer.resblocks.35.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
634 |
+
"transformer.visual.transformer.resblocks.35.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
635 |
+
"transformer.visual.transformer.resblocks.35.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
636 |
+
"transformer.visual.transformer.resblocks.35.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
637 |
+
"transformer.visual.transformer.resblocks.35.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
638 |
+
"transformer.visual.transformer.resblocks.35.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
639 |
+
"transformer.visual.transformer.resblocks.35.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
640 |
+
"transformer.visual.transformer.resblocks.35.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
641 |
+
"transformer.visual.transformer.resblocks.35.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
642 |
+
"transformer.visual.transformer.resblocks.36.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
643 |
+
"transformer.visual.transformer.resblocks.36.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
644 |
+
"transformer.visual.transformer.resblocks.36.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
645 |
+
"transformer.visual.transformer.resblocks.36.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
646 |
+
"transformer.visual.transformer.resblocks.36.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
647 |
+
"transformer.visual.transformer.resblocks.36.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
648 |
+
"transformer.visual.transformer.resblocks.36.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
649 |
+
"transformer.visual.transformer.resblocks.36.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
650 |
+
"transformer.visual.transformer.resblocks.36.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
651 |
+
"transformer.visual.transformer.resblocks.36.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
652 |
+
"transformer.visual.transformer.resblocks.36.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
653 |
+
"transformer.visual.transformer.resblocks.36.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
654 |
+
"transformer.visual.transformer.resblocks.37.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
655 |
+
"transformer.visual.transformer.resblocks.37.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
656 |
+
"transformer.visual.transformer.resblocks.37.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
657 |
+
"transformer.visual.transformer.resblocks.37.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
658 |
+
"transformer.visual.transformer.resblocks.37.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
659 |
+
"transformer.visual.transformer.resblocks.37.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
660 |
+
"transformer.visual.transformer.resblocks.37.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
661 |
+
"transformer.visual.transformer.resblocks.37.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
662 |
+
"transformer.visual.transformer.resblocks.37.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
663 |
+
"transformer.visual.transformer.resblocks.37.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
664 |
+
"transformer.visual.transformer.resblocks.37.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
665 |
+
"transformer.visual.transformer.resblocks.37.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
666 |
+
"transformer.visual.transformer.resblocks.38.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
667 |
+
"transformer.visual.transformer.resblocks.38.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
668 |
+
"transformer.visual.transformer.resblocks.38.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
669 |
+
"transformer.visual.transformer.resblocks.38.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
670 |
+
"transformer.visual.transformer.resblocks.38.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
671 |
+
"transformer.visual.transformer.resblocks.38.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
672 |
+
"transformer.visual.transformer.resblocks.38.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
673 |
+
"transformer.visual.transformer.resblocks.38.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
674 |
+
"transformer.visual.transformer.resblocks.38.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
675 |
+
"transformer.visual.transformer.resblocks.38.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
676 |
+
"transformer.visual.transformer.resblocks.38.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
677 |
+
"transformer.visual.transformer.resblocks.38.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
678 |
+
"transformer.visual.transformer.resblocks.39.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
679 |
+
"transformer.visual.transformer.resblocks.39.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
680 |
+
"transformer.visual.transformer.resblocks.39.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
681 |
+
"transformer.visual.transformer.resblocks.39.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
682 |
+
"transformer.visual.transformer.resblocks.39.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
683 |
+
"transformer.visual.transformer.resblocks.39.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
684 |
+
"transformer.visual.transformer.resblocks.39.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
685 |
+
"transformer.visual.transformer.resblocks.39.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
686 |
+
"transformer.visual.transformer.resblocks.39.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
687 |
+
"transformer.visual.transformer.resblocks.39.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
688 |
+
"transformer.visual.transformer.resblocks.39.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
689 |
+
"transformer.visual.transformer.resblocks.39.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
690 |
+
"transformer.visual.transformer.resblocks.4.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
691 |
+
"transformer.visual.transformer.resblocks.4.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
692 |
+
"transformer.visual.transformer.resblocks.4.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
693 |
+
"transformer.visual.transformer.resblocks.4.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
694 |
+
"transformer.visual.transformer.resblocks.4.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
695 |
+
"transformer.visual.transformer.resblocks.4.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
696 |
+
"transformer.visual.transformer.resblocks.4.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
697 |
+
"transformer.visual.transformer.resblocks.4.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
698 |
+
"transformer.visual.transformer.resblocks.4.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
699 |
+
"transformer.visual.transformer.resblocks.4.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
700 |
+
"transformer.visual.transformer.resblocks.4.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
701 |
+
"transformer.visual.transformer.resblocks.4.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
702 |
+
"transformer.visual.transformer.resblocks.40.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
703 |
+
"transformer.visual.transformer.resblocks.40.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
704 |
+
"transformer.visual.transformer.resblocks.40.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
705 |
+
"transformer.visual.transformer.resblocks.40.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
706 |
+
"transformer.visual.transformer.resblocks.40.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
707 |
+
"transformer.visual.transformer.resblocks.40.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
708 |
+
"transformer.visual.transformer.resblocks.40.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
709 |
+
"transformer.visual.transformer.resblocks.40.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
710 |
+
"transformer.visual.transformer.resblocks.40.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
711 |
+
"transformer.visual.transformer.resblocks.40.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
712 |
+
"transformer.visual.transformer.resblocks.40.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
713 |
+
"transformer.visual.transformer.resblocks.40.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
714 |
+
"transformer.visual.transformer.resblocks.41.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
715 |
+
"transformer.visual.transformer.resblocks.41.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
716 |
+
"transformer.visual.transformer.resblocks.41.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
717 |
+
"transformer.visual.transformer.resblocks.41.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
718 |
+
"transformer.visual.transformer.resblocks.41.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
719 |
+
"transformer.visual.transformer.resblocks.41.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
720 |
+
"transformer.visual.transformer.resblocks.41.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
721 |
+
"transformer.visual.transformer.resblocks.41.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
722 |
+
"transformer.visual.transformer.resblocks.41.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
723 |
+
"transformer.visual.transformer.resblocks.41.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
724 |
+
"transformer.visual.transformer.resblocks.41.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
725 |
+
"transformer.visual.transformer.resblocks.41.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
726 |
+
"transformer.visual.transformer.resblocks.42.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
727 |
+
"transformer.visual.transformer.resblocks.42.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
728 |
+
"transformer.visual.transformer.resblocks.42.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
729 |
+
"transformer.visual.transformer.resblocks.42.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
730 |
+
"transformer.visual.transformer.resblocks.42.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
731 |
+
"transformer.visual.transformer.resblocks.42.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
732 |
+
"transformer.visual.transformer.resblocks.42.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
733 |
+
"transformer.visual.transformer.resblocks.42.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
734 |
+
"transformer.visual.transformer.resblocks.42.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
735 |
+
"transformer.visual.transformer.resblocks.42.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
736 |
+
"transformer.visual.transformer.resblocks.42.mlp.c_proj.bias": "pytorch_model-00009-of-00010.bin",
|
737 |
+
"transformer.visual.transformer.resblocks.42.mlp.c_proj.weight": "pytorch_model-00009-of-00010.bin",
|
738 |
+
"transformer.visual.transformer.resblocks.43.attn.in_proj.bias": "pytorch_model-00009-of-00010.bin",
|
739 |
+
"transformer.visual.transformer.resblocks.43.attn.in_proj.weight": "pytorch_model-00009-of-00010.bin",
|
740 |
+
"transformer.visual.transformer.resblocks.43.attn.out_proj.bias": "pytorch_model-00009-of-00010.bin",
|
741 |
+
"transformer.visual.transformer.resblocks.43.attn.out_proj.weight": "pytorch_model-00009-of-00010.bin",
|
742 |
+
"transformer.visual.transformer.resblocks.43.ln_1.bias": "pytorch_model-00009-of-00010.bin",
|
743 |
+
"transformer.visual.transformer.resblocks.43.ln_1.weight": "pytorch_model-00009-of-00010.bin",
|
744 |
+
"transformer.visual.transformer.resblocks.43.ln_2.bias": "pytorch_model-00009-of-00010.bin",
|
745 |
+
"transformer.visual.transformer.resblocks.43.ln_2.weight": "pytorch_model-00009-of-00010.bin",
|
746 |
+
"transformer.visual.transformer.resblocks.43.mlp.c_fc.bias": "pytorch_model-00009-of-00010.bin",
|
747 |
+
"transformer.visual.transformer.resblocks.43.mlp.c_fc.weight": "pytorch_model-00009-of-00010.bin",
|
748 |
+
"transformer.visual.transformer.resblocks.43.mlp.c_proj.bias": "pytorch_model-00010-of-00010.bin",
|
749 |
+
"transformer.visual.transformer.resblocks.43.mlp.c_proj.weight": "pytorch_model-00010-of-00010.bin",
|
750 |
+
"transformer.visual.transformer.resblocks.44.attn.in_proj.bias": "pytorch_model-00010-of-00010.bin",
|
751 |
+
"transformer.visual.transformer.resblocks.44.attn.in_proj.weight": "pytorch_model-00010-of-00010.bin",
|
752 |
+
"transformer.visual.transformer.resblocks.44.attn.out_proj.bias": "pytorch_model-00010-of-00010.bin",
|
753 |
+
"transformer.visual.transformer.resblocks.44.attn.out_proj.weight": "pytorch_model-00010-of-00010.bin",
|
754 |
+
"transformer.visual.transformer.resblocks.44.ln_1.bias": "pytorch_model-00010-of-00010.bin",
|
755 |
+
"transformer.visual.transformer.resblocks.44.ln_1.weight": "pytorch_model-00010-of-00010.bin",
|
756 |
+
"transformer.visual.transformer.resblocks.44.ln_2.bias": "pytorch_model-00010-of-00010.bin",
|
757 |
+
"transformer.visual.transformer.resblocks.44.ln_2.weight": "pytorch_model-00010-of-00010.bin",
|
758 |
+
"transformer.visual.transformer.resblocks.44.mlp.c_fc.bias": "pytorch_model-00010-of-00010.bin",
|
759 |
+
"transformer.visual.transformer.resblocks.44.mlp.c_fc.weight": "pytorch_model-00010-of-00010.bin",
|
760 |
+
"transformer.visual.transformer.resblocks.44.mlp.c_proj.bias": "pytorch_model-00010-of-00010.bin",
|
761 |
+
"transformer.visual.transformer.resblocks.44.mlp.c_proj.weight": "pytorch_model-00010-of-00010.bin",
|
762 |
+
"transformer.visual.transformer.resblocks.45.attn.in_proj.bias": "pytorch_model-00010-of-00010.bin",
|
763 |
+
"transformer.visual.transformer.resblocks.45.attn.in_proj.weight": "pytorch_model-00010-of-00010.bin",
|
764 |
+
"transformer.visual.transformer.resblocks.45.attn.out_proj.bias": "pytorch_model-00010-of-00010.bin",
|
765 |
+
"transformer.visual.transformer.resblocks.45.attn.out_proj.weight": "pytorch_model-00010-of-00010.bin",
|
766 |
+
"transformer.visual.transformer.resblocks.45.ln_1.bias": "pytorch_model-00010-of-00010.bin",
|
767 |
+
"transformer.visual.transformer.resblocks.45.ln_1.weight": "pytorch_model-00010-of-00010.bin",
|
768 |
+
"transformer.visual.transformer.resblocks.45.ln_2.bias": "pytorch_model-00010-of-00010.bin",
|
769 |
+
"transformer.visual.transformer.resblocks.45.ln_2.weight": "pytorch_model-00010-of-00010.bin",
|
770 |
+
"transformer.visual.transformer.resblocks.45.mlp.c_fc.bias": "pytorch_model-00010-of-00010.bin",
|
771 |
+
"transformer.visual.transformer.resblocks.45.mlp.c_fc.weight": "pytorch_model-00010-of-00010.bin",
|
772 |
+
"transformer.visual.transformer.resblocks.45.mlp.c_proj.bias": "pytorch_model-00010-of-00010.bin",
|
773 |
+
"transformer.visual.transformer.resblocks.45.mlp.c_proj.weight": "pytorch_model-00010-of-00010.bin",
|
774 |
+
"transformer.visual.transformer.resblocks.46.attn.in_proj.bias": "pytorch_model-00010-of-00010.bin",
|
775 |
+
"transformer.visual.transformer.resblocks.46.attn.in_proj.weight": "pytorch_model-00010-of-00010.bin",
|
776 |
+
"transformer.visual.transformer.resblocks.46.attn.out_proj.bias": "pytorch_model-00010-of-00010.bin",
|
777 |
+
"transformer.visual.transformer.resblocks.46.attn.out_proj.weight": "pytorch_model-00010-of-00010.bin",
|
778 |
+
"transformer.visual.transformer.resblocks.46.ln_1.bias": "pytorch_model-00010-of-00010.bin",
|
779 |
+
"transformer.visual.transformer.resblocks.46.ln_1.weight": "pytorch_model-00010-of-00010.bin",
|
780 |
+
"transformer.visual.transformer.resblocks.46.ln_2.bias": "pytorch_model-00010-of-00010.bin",
|
781 |
+
"transformer.visual.transformer.resblocks.46.ln_2.weight": "pytorch_model-00010-of-00010.bin",
|
782 |
+
"transformer.visual.transformer.resblocks.46.mlp.c_fc.bias": "pytorch_model-00010-of-00010.bin",
|
783 |
+
"transformer.visual.transformer.resblocks.46.mlp.c_fc.weight": "pytorch_model-00010-of-00010.bin",
|
784 |
+
"transformer.visual.transformer.resblocks.46.mlp.c_proj.bias": "pytorch_model-00010-of-00010.bin",
|
785 |
+
"transformer.visual.transformer.resblocks.46.mlp.c_proj.weight": "pytorch_model-00010-of-00010.bin",
|
786 |
+
"transformer.visual.transformer.resblocks.47.attn.in_proj.bias": "pytorch_model-00010-of-00010.bin",
|
787 |
+
"transformer.visual.transformer.resblocks.47.attn.in_proj.weight": "pytorch_model-00010-of-00010.bin",
|
788 |
+
"transformer.visual.transformer.resblocks.47.attn.out_proj.bias": "pytorch_model-00010-of-00010.bin",
|
789 |
+
"transformer.visual.transformer.resblocks.47.attn.out_proj.weight": "pytorch_model-00010-of-00010.bin",
|
790 |
+
"transformer.visual.transformer.resblocks.47.ln_1.bias": "pytorch_model-00010-of-00010.bin",
|
791 |
+
"transformer.visual.transformer.resblocks.47.ln_1.weight": "pytorch_model-00010-of-00010.bin",
|
792 |
+
"transformer.visual.transformer.resblocks.47.ln_2.bias": "pytorch_model-00010-of-00010.bin",
|
793 |
+
"transformer.visual.transformer.resblocks.47.ln_2.weight": "pytorch_model-00010-of-00010.bin",
|
794 |
+
"transformer.visual.transformer.resblocks.47.mlp.c_fc.bias": "pytorch_model-00010-of-00010.bin",
|
795 |
+
"transformer.visual.transformer.resblocks.47.mlp.c_fc.weight": "pytorch_model-00010-of-00010.bin",
|
796 |
+
"transformer.visual.transformer.resblocks.47.mlp.c_proj.bias": "pytorch_model-00010-of-00010.bin",
|
797 |
+
"transformer.visual.transformer.resblocks.47.mlp.c_proj.weight": "pytorch_model-00010-of-00010.bin",
|
798 |
+
"transformer.visual.transformer.resblocks.5.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
799 |
+
"transformer.visual.transformer.resblocks.5.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
800 |
+
"transformer.visual.transformer.resblocks.5.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
801 |
+
"transformer.visual.transformer.resblocks.5.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
802 |
+
"transformer.visual.transformer.resblocks.5.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
803 |
+
"transformer.visual.transformer.resblocks.5.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
804 |
+
"transformer.visual.transformer.resblocks.5.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
805 |
+
"transformer.visual.transformer.resblocks.5.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
806 |
+
"transformer.visual.transformer.resblocks.5.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
807 |
+
"transformer.visual.transformer.resblocks.5.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
808 |
+
"transformer.visual.transformer.resblocks.5.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
809 |
+
"transformer.visual.transformer.resblocks.5.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
810 |
+
"transformer.visual.transformer.resblocks.6.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
811 |
+
"transformer.visual.transformer.resblocks.6.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
812 |
+
"transformer.visual.transformer.resblocks.6.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
813 |
+
"transformer.visual.transformer.resblocks.6.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
814 |
+
"transformer.visual.transformer.resblocks.6.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
815 |
+
"transformer.visual.transformer.resblocks.6.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
816 |
+
"transformer.visual.transformer.resblocks.6.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
817 |
+
"transformer.visual.transformer.resblocks.6.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
818 |
+
"transformer.visual.transformer.resblocks.6.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
819 |
+
"transformer.visual.transformer.resblocks.6.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
820 |
+
"transformer.visual.transformer.resblocks.6.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
821 |
+
"transformer.visual.transformer.resblocks.6.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
822 |
+
"transformer.visual.transformer.resblocks.7.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
823 |
+
"transformer.visual.transformer.resblocks.7.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
824 |
+
"transformer.visual.transformer.resblocks.7.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
825 |
+
"transformer.visual.transformer.resblocks.7.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
826 |
+
"transformer.visual.transformer.resblocks.7.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
827 |
+
"transformer.visual.transformer.resblocks.7.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
828 |
+
"transformer.visual.transformer.resblocks.7.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
829 |
+
"transformer.visual.transformer.resblocks.7.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
830 |
+
"transformer.visual.transformer.resblocks.7.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
831 |
+
"transformer.visual.transformer.resblocks.7.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
832 |
+
"transformer.visual.transformer.resblocks.7.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
833 |
+
"transformer.visual.transformer.resblocks.7.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
834 |
+
"transformer.visual.transformer.resblocks.8.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
835 |
+
"transformer.visual.transformer.resblocks.8.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
836 |
+
"transformer.visual.transformer.resblocks.8.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
837 |
+
"transformer.visual.transformer.resblocks.8.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
838 |
+
"transformer.visual.transformer.resblocks.8.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
839 |
+
"transformer.visual.transformer.resblocks.8.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
840 |
+
"transformer.visual.transformer.resblocks.8.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
841 |
+
"transformer.visual.transformer.resblocks.8.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
842 |
+
"transformer.visual.transformer.resblocks.8.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
843 |
+
"transformer.visual.transformer.resblocks.8.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
844 |
+
"transformer.visual.transformer.resblocks.8.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
845 |
+
"transformer.visual.transformer.resblocks.8.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
846 |
+
"transformer.visual.transformer.resblocks.9.attn.in_proj.bias": "pytorch_model-00008-of-00010.bin",
|
847 |
+
"transformer.visual.transformer.resblocks.9.attn.in_proj.weight": "pytorch_model-00008-of-00010.bin",
|
848 |
+
"transformer.visual.transformer.resblocks.9.attn.out_proj.bias": "pytorch_model-00008-of-00010.bin",
|
849 |
+
"transformer.visual.transformer.resblocks.9.attn.out_proj.weight": "pytorch_model-00008-of-00010.bin",
|
850 |
+
"transformer.visual.transformer.resblocks.9.ln_1.bias": "pytorch_model-00008-of-00010.bin",
|
851 |
+
"transformer.visual.transformer.resblocks.9.ln_1.weight": "pytorch_model-00008-of-00010.bin",
|
852 |
+
"transformer.visual.transformer.resblocks.9.ln_2.bias": "pytorch_model-00008-of-00010.bin",
|
853 |
+
"transformer.visual.transformer.resblocks.9.ln_2.weight": "pytorch_model-00008-of-00010.bin",
|
854 |
+
"transformer.visual.transformer.resblocks.9.mlp.c_fc.bias": "pytorch_model-00008-of-00010.bin",
|
855 |
+
"transformer.visual.transformer.resblocks.9.mlp.c_fc.weight": "pytorch_model-00008-of-00010.bin",
|
856 |
+
"transformer.visual.transformer.resblocks.9.mlp.c_proj.bias": "pytorch_model-00008-of-00010.bin",
|
857 |
+
"transformer.visual.transformer.resblocks.9.mlp.c_proj.weight": "pytorch_model-00008-of-00010.bin",
|
858 |
+
"transformer.wte.weight": "pytorch_model-00001-of-00010.bin"
|
859 |
+
}
|
860 |
+
}
|
Qwen-VL-Chat/qwen.tiktoken
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Qwen-VL-Chat/qwen_generation_utils.py
ADDED
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
"""Generation support."""
|
7 |
+
|
8 |
+
from typing import Tuple, List, Union, Iterable
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import torch
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from transformers import PreTrainedTokenizer
|
14 |
+
from transformers import logging
|
15 |
+
from transformers.generation import LogitsProcessor
|
16 |
+
|
17 |
+
logger = logging.get_logger(__name__)
|
18 |
+
|
19 |
+
# Types.
|
20 |
+
HistoryType = List[Tuple[str, str]]
|
21 |
+
TokensType = List[int]
|
22 |
+
BatchTokensType = List[List[int]]
|
23 |
+
|
24 |
+
|
25 |
+
def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
|
26 |
+
for tokens in batch:
|
27 |
+
context_length = len(tokens)
|
28 |
+
if context_length < seq_length:
|
29 |
+
tokens.extend([pad_id] * (seq_length - context_length))
|
30 |
+
return batch
|
31 |
+
|
32 |
+
|
33 |
+
def get_ltor_masks_and_position_ids(
|
34 |
+
data,
|
35 |
+
eod_token,
|
36 |
+
reset_position_ids,
|
37 |
+
reset_attention_mask,
|
38 |
+
eod_mask_loss,
|
39 |
+
):
|
40 |
+
"""Build masks and position id for left to right model."""
|
41 |
+
|
42 |
+
# Extract batch size and sequence length.
|
43 |
+
micro_batch_size, seq_length = data.size()
|
44 |
+
|
45 |
+
# Attention mask (lower triangular).
|
46 |
+
if reset_attention_mask:
|
47 |
+
att_mask_batch = micro_batch_size
|
48 |
+
else:
|
49 |
+
att_mask_batch = 1
|
50 |
+
attention_mask = torch.tril(
|
51 |
+
torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
|
52 |
+
).view(att_mask_batch, 1, seq_length, seq_length)
|
53 |
+
|
54 |
+
# Loss mask.
|
55 |
+
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
|
56 |
+
if eod_mask_loss:
|
57 |
+
loss_mask[data == eod_token] = 0.0
|
58 |
+
|
59 |
+
# Position ids.
|
60 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
|
61 |
+
position_ids = position_ids.unsqueeze(0).expand_as(data)
|
62 |
+
# We need to clone as the ids will be modifed based on batch index.
|
63 |
+
if reset_position_ids:
|
64 |
+
position_ids = position_ids.clone()
|
65 |
+
|
66 |
+
if reset_position_ids or reset_attention_mask:
|
67 |
+
# Loop through the batches:
|
68 |
+
for b in range(micro_batch_size):
|
69 |
+
|
70 |
+
# Find indecies where EOD token is.
|
71 |
+
eod_index = position_ids[b, data[b] == eod_token]
|
72 |
+
# Detach indecies from positions if going to modify positions.
|
73 |
+
if reset_position_ids:
|
74 |
+
eod_index = eod_index.clone()
|
75 |
+
|
76 |
+
# Loop through EOD indecies:
|
77 |
+
prev_index = 0
|
78 |
+
for j in range(eod_index.size()[0]):
|
79 |
+
i = eod_index[j]
|
80 |
+
# Mask attention loss.
|
81 |
+
if reset_attention_mask:
|
82 |
+
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
|
83 |
+
# Reset positions.
|
84 |
+
if reset_position_ids:
|
85 |
+
position_ids[b, (i + 1) :] -= i + 1 - prev_index
|
86 |
+
prev_index = i + 1
|
87 |
+
|
88 |
+
# Convert attention mask to binary:
|
89 |
+
attention_mask = attention_mask < 0.5
|
90 |
+
|
91 |
+
return attention_mask, loss_mask, position_ids
|
92 |
+
|
93 |
+
|
94 |
+
def get_batch(context_tokens: torch.LongTensor, eod_id: int):
|
95 |
+
"""Generate batch from context tokens."""
|
96 |
+
# Move to GPU.
|
97 |
+
tokens = context_tokens.contiguous().to(context_tokens.device)
|
98 |
+
# Get the attention mask and postition ids.
|
99 |
+
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
|
100 |
+
tokens,
|
101 |
+
eod_id,
|
102 |
+
reset_position_ids=False,
|
103 |
+
reset_attention_mask=False,
|
104 |
+
eod_mask_loss=False,
|
105 |
+
)
|
106 |
+
return tokens, attention_mask, position_ids
|
107 |
+
|
108 |
+
|
109 |
+
def get_stop_words_ids(chat_format, tokenizer):
|
110 |
+
if chat_format == "raw":
|
111 |
+
stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
|
112 |
+
elif chat_format == "chatml":
|
113 |
+
stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
|
114 |
+
else:
|
115 |
+
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
|
116 |
+
return stop_words_ids
|
117 |
+
|
118 |
+
|
119 |
+
def make_context(
|
120 |
+
tokenizer: PreTrainedTokenizer,
|
121 |
+
query: str,
|
122 |
+
history: List[Tuple[str, str]] = None,
|
123 |
+
system: str = "",
|
124 |
+
max_window_size: int = 6144,
|
125 |
+
chat_format: str = "chatml",
|
126 |
+
):
|
127 |
+
if history is None:
|
128 |
+
history = []
|
129 |
+
|
130 |
+
if chat_format == "chatml":
|
131 |
+
im_start, im_end = "<|im_start|>", "<|im_end|>"
|
132 |
+
im_start_tokens = [tokenizer.im_start_id]
|
133 |
+
im_end_tokens = [tokenizer.im_end_id]
|
134 |
+
nl_tokens = tokenizer.encode("\n")
|
135 |
+
|
136 |
+
def _tokenize_str(role, content):
|
137 |
+
return f"{role}\n{content}", tokenizer.encode(
|
138 |
+
role, allowed_special=set(tokenizer.IMAGE_ST)
|
139 |
+
) + nl_tokens + tokenizer.encode(content, allowed_special=set(tokenizer.IMAGE_ST))
|
140 |
+
|
141 |
+
system_text, system_tokens_part = _tokenize_str("system", system)
|
142 |
+
system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
|
143 |
+
|
144 |
+
raw_text = ""
|
145 |
+
context_tokens = []
|
146 |
+
|
147 |
+
for turn_query, turn_response in reversed(history):
|
148 |
+
query_text, query_tokens_part = _tokenize_str("user", turn_query)
|
149 |
+
query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
|
150 |
+
if turn_response is not None:
|
151 |
+
response_text, response_tokens_part = _tokenize_str(
|
152 |
+
"assistant", turn_response
|
153 |
+
)
|
154 |
+
response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
|
155 |
+
|
156 |
+
next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
|
157 |
+
prev_chat = (
|
158 |
+
f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
|
159 |
+
)
|
160 |
+
else:
|
161 |
+
next_context_tokens = nl_tokens + query_tokens + nl_tokens
|
162 |
+
prev_chat = f"\n{im_start}{query_text}{im_end}\n"
|
163 |
+
|
164 |
+
current_context_size = (
|
165 |
+
len(system_tokens) + len(next_context_tokens) + len(context_tokens)
|
166 |
+
)
|
167 |
+
if current_context_size < max_window_size:
|
168 |
+
context_tokens = next_context_tokens + context_tokens
|
169 |
+
raw_text = prev_chat + raw_text
|
170 |
+
else:
|
171 |
+
break
|
172 |
+
|
173 |
+
context_tokens = system_tokens + context_tokens
|
174 |
+
raw_text = f"{im_start}{system_text}{im_end}" + raw_text
|
175 |
+
context_tokens += (
|
176 |
+
nl_tokens
|
177 |
+
+ im_start_tokens
|
178 |
+
+ _tokenize_str("user", query)[1]
|
179 |
+
+ im_end_tokens
|
180 |
+
+ nl_tokens
|
181 |
+
+ im_start_tokens
|
182 |
+
+ tokenizer.encode("assistant")
|
183 |
+
+ nl_tokens
|
184 |
+
)
|
185 |
+
raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
|
186 |
+
|
187 |
+
elif chat_format == "raw":
|
188 |
+
raw_text = query
|
189 |
+
context_tokens = tokenizer.encode(raw_text)
|
190 |
+
else:
|
191 |
+
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
|
192 |
+
|
193 |
+
return raw_text, context_tokens
|
194 |
+
|
195 |
+
|
196 |
+
def _decode_default(
|
197 |
+
tokens: List[int],
|
198 |
+
*,
|
199 |
+
stop_words: List[str],
|
200 |
+
eod_words: List[str],
|
201 |
+
tokenizer: PreTrainedTokenizer,
|
202 |
+
raw_text_len: int,
|
203 |
+
verbose: bool = False,
|
204 |
+
return_end_reason: bool = False,
|
205 |
+
errors: str='replace',
|
206 |
+
):
|
207 |
+
trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
|
208 |
+
if verbose:
|
209 |
+
print("\nRaw Generate: ", trim_decode_tokens)
|
210 |
+
|
211 |
+
end_reason = f"Gen length {len(tokens)}"
|
212 |
+
for stop_word in stop_words:
|
213 |
+
trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
|
214 |
+
for eod_word in eod_words:
|
215 |
+
if eod_word in trim_decode_tokens:
|
216 |
+
end_reason = f"Gen {eod_word!r}"
|
217 |
+
trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
|
218 |
+
trim_decode_tokens = trim_decode_tokens.strip()
|
219 |
+
if verbose:
|
220 |
+
print("\nEnd Reason:", end_reason)
|
221 |
+
print("\nGenerate: ", trim_decode_tokens)
|
222 |
+
|
223 |
+
if return_end_reason:
|
224 |
+
return trim_decode_tokens, end_reason
|
225 |
+
else:
|
226 |
+
return trim_decode_tokens
|
227 |
+
|
228 |
+
|
229 |
+
def _decode_chatml(
|
230 |
+
tokens: List[int],
|
231 |
+
*,
|
232 |
+
stop_words: List[str],
|
233 |
+
eod_token_ids: List[int],
|
234 |
+
tokenizer: PreTrainedTokenizer,
|
235 |
+
raw_text_len: int,
|
236 |
+
context_length: int,
|
237 |
+
verbose: bool = False,
|
238 |
+
return_end_reason: bool = False,
|
239 |
+
errors: str='replace'
|
240 |
+
):
|
241 |
+
end_reason = f"Gen length {len(tokens)}"
|
242 |
+
eod_token_idx = context_length
|
243 |
+
for eod_token_idx in range(context_length, len(tokens)):
|
244 |
+
if tokens[eod_token_idx] in eod_token_ids:
|
245 |
+
end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
|
246 |
+
break
|
247 |
+
|
248 |
+
trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
|
249 |
+
if verbose:
|
250 |
+
print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
|
251 |
+
print("\nRaw Generate:", trim_decode_tokens)
|
252 |
+
print("\nEnd Reason:", end_reason)
|
253 |
+
for stop_word in stop_words:
|
254 |
+
trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
|
255 |
+
trim_decode_tokens = trim_decode_tokens.strip()
|
256 |
+
if verbose:
|
257 |
+
print("\nGenerate:", trim_decode_tokens)
|
258 |
+
|
259 |
+
if return_end_reason:
|
260 |
+
return trim_decode_tokens, end_reason
|
261 |
+
else:
|
262 |
+
return trim_decode_tokens
|
263 |
+
|
264 |
+
|
265 |
+
def decode_tokens(
|
266 |
+
tokens: Union[torch.LongTensor, TokensType],
|
267 |
+
tokenizer: PreTrainedTokenizer,
|
268 |
+
raw_text_len: int,
|
269 |
+
context_length: int,
|
270 |
+
chat_format: str,
|
271 |
+
verbose: bool = False,
|
272 |
+
return_end_reason: bool = False,
|
273 |
+
errors: str="replace",
|
274 |
+
) -> str:
|
275 |
+
if torch.is_tensor(tokens):
|
276 |
+
tokens = tokens.cpu().numpy().tolist()
|
277 |
+
|
278 |
+
if chat_format == "chatml":
|
279 |
+
return _decode_chatml(
|
280 |
+
tokens,
|
281 |
+
stop_words=[],
|
282 |
+
eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
|
283 |
+
tokenizer=tokenizer,
|
284 |
+
raw_text_len=raw_text_len,
|
285 |
+
context_length=context_length,
|
286 |
+
verbose=verbose,
|
287 |
+
return_end_reason=return_end_reason,
|
288 |
+
errors=errors,
|
289 |
+
)
|
290 |
+
elif chat_format == "raw":
|
291 |
+
return _decode_default(
|
292 |
+
tokens,
|
293 |
+
stop_words=["<|endoftext|>"],
|
294 |
+
eod_words=["<|endoftext|>"],
|
295 |
+
tokenizer=tokenizer,
|
296 |
+
raw_text_len=raw_text_len,
|
297 |
+
verbose=verbose,
|
298 |
+
return_end_reason=return_end_reason,
|
299 |
+
errors=errors,
|
300 |
+
)
|
301 |
+
else:
|
302 |
+
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
|
303 |
+
|
304 |
+
|
305 |
+
class StopWordsLogitsProcessor(LogitsProcessor):
|
306 |
+
"""
|
307 |
+
:class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
|
308 |
+
|
309 |
+
Args:
|
310 |
+
stop_words_ids (:obj:`List[List[int]]`):
|
311 |
+
List of list of token ids of stop ids. In order to get the tokens of the words
|
312 |
+
that should not appear in the generated text, use :obj:`tokenizer(bad_word,
|
313 |
+
add_prefix_space=True).input_ids`.
|
314 |
+
eos_token_id (:obj:`int`):
|
315 |
+
The id of the `end-of-sequence` token.
|
316 |
+
"""
|
317 |
+
|
318 |
+
def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
|
319 |
+
|
320 |
+
if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
|
321 |
+
raise ValueError(
|
322 |
+
f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}."
|
323 |
+
)
|
324 |
+
if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
|
325 |
+
raise ValueError(
|
326 |
+
f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}."
|
327 |
+
)
|
328 |
+
if any(
|
329 |
+
any(
|
330 |
+
(not isinstance(token_id, (int, np.integer)) or token_id < 0)
|
331 |
+
for token_id in stop_word_ids
|
332 |
+
)
|
333 |
+
for stop_word_ids in stop_words_ids
|
334 |
+
):
|
335 |
+
raise ValueError(
|
336 |
+
f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}."
|
337 |
+
)
|
338 |
+
|
339 |
+
self.stop_words_ids = list(
|
340 |
+
filter(
|
341 |
+
lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids
|
342 |
+
)
|
343 |
+
)
|
344 |
+
self.eos_token_id = eos_token_id
|
345 |
+
for stop_token_seq in self.stop_words_ids:
|
346 |
+
assert (
|
347 |
+
len(stop_token_seq) > 0
|
348 |
+
), "Stop words token sequences {} cannot have an empty list".format(
|
349 |
+
stop_words_ids
|
350 |
+
)
|
351 |
+
|
352 |
+
def __call__(
|
353 |
+
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
|
354 |
+
) -> torch.FloatTensor:
|
355 |
+
stopped_samples = self._calc_stopped_samples(input_ids)
|
356 |
+
for i, should_stop in enumerate(stopped_samples):
|
357 |
+
if should_stop:
|
358 |
+
scores[i, self.eos_token_id] = float(2**15)
|
359 |
+
return scores
|
360 |
+
|
361 |
+
def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
|
362 |
+
if len(tokens) == 0:
|
363 |
+
# if bad word tokens is just one token always ban it
|
364 |
+
return True
|
365 |
+
elif len(tokens) > len(prev_tokens):
|
366 |
+
# if bad word tokens are longer then prev input_ids they can't be equal
|
367 |
+
return False
|
368 |
+
elif prev_tokens[-len(tokens) :].tolist() == tokens:
|
369 |
+
# if tokens match
|
370 |
+
return True
|
371 |
+
else:
|
372 |
+
return False
|
373 |
+
|
374 |
+
def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
|
375 |
+
stopped_samples = []
|
376 |
+
for prev_input_ids_slice in prev_input_ids:
|
377 |
+
match = False
|
378 |
+
for stop_token_seq in self.stop_words_ids:
|
379 |
+
if self._tokens_match(prev_input_ids_slice, stop_token_seq):
|
380 |
+
# if tokens do not match continue
|
381 |
+
match = True
|
382 |
+
break
|
383 |
+
stopped_samples.append(match)
|
384 |
+
|
385 |
+
return stopped_samples
|
386 |
+
|
387 |
+
|
388 |
+
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
|
389 |
+
"""This function has been mostly taken from huggingface conversational
|
390 |
+
ai code at
|
391 |
+
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
|
392 |
+
conversational-ai-with-transfer-learning-2d818ac26313"""
|
393 |
+
|
394 |
+
if top_k > 0:
|
395 |
+
# Remove all tokens with a probability less than the
|
396 |
+
# last token of the top-k
|
397 |
+
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
398 |
+
logits[indices_to_remove] = filter_value
|
399 |
+
|
400 |
+
if top_p > 0.0:
|
401 |
+
# Cconvert to 1D
|
402 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
|
403 |
+
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
404 |
+
|
405 |
+
# Remove tokens with cumulative probability above the threshold
|
406 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
407 |
+
# Shift the indices to the right to keep also the first token
|
408 |
+
# above the threshold
|
409 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
410 |
+
sorted_indices_to_remove[..., 0] = 0
|
411 |
+
for i in range(sorted_indices.size(0)):
|
412 |
+
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
|
413 |
+
logits[i][indices_to_remove] = filter_value
|
414 |
+
|
415 |
+
return logits
|
416 |
+
|
417 |
+
|
418 |
+
def switch(val1, val2, boolean):
|
419 |
+
boolean = boolean.type_as(val1)
|
420 |
+
return (1 - boolean) * val1 + boolean * val2
|
Qwen-VL-Chat/tokenization_qwen.py
ADDED
@@ -0,0 +1,598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
"""Tokenization classes for QWen."""
|
7 |
+
|
8 |
+
import base64
|
9 |
+
import logging
|
10 |
+
import os
|
11 |
+
import requests
|
12 |
+
import unicodedata
|
13 |
+
from typing import Collection, Dict, List, Set, Tuple, Union, Any, Callable, Optional
|
14 |
+
|
15 |
+
import tiktoken
|
16 |
+
import numpy as np
|
17 |
+
from PIL import Image
|
18 |
+
from PIL import ImageFont
|
19 |
+
from PIL import ImageDraw
|
20 |
+
from transformers import PreTrainedTokenizer, AddedToken
|
21 |
+
from transformers.utils import try_to_load_from_cache
|
22 |
+
|
23 |
+
import matplotlib.colors as mcolors
|
24 |
+
from matplotlib.font_manager import FontProperties
|
25 |
+
|
26 |
+
logger = logging.getLogger(__name__)
|
27 |
+
|
28 |
+
|
29 |
+
VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken", "ttf": "SimSun.ttf"}
|
30 |
+
FONT_PATH = try_to_load_from_cache("Qwen/Qwen-VL-Chat", "SimSun.ttf")
|
31 |
+
if FONT_PATH is None:
|
32 |
+
if not os.path.exists("SimSun.ttf"):
|
33 |
+
ttf = requests.get("https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/SimSun.ttf")
|
34 |
+
open("SimSun.ttf", "wb").write(ttf.content)
|
35 |
+
FONT_PATH = "SimSun.ttf"
|
36 |
+
|
37 |
+
PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
38 |
+
ENDOFTEXT = "<|endoftext|>"
|
39 |
+
IMSTART = "<|im_start|>"
|
40 |
+
IMEND = "<|im_end|>"
|
41 |
+
# as the default behavior is changed to allow special tokens in
|
42 |
+
# regular texts, the surface forms of special tokens need to be
|
43 |
+
# as different as possible to minimize the impact
|
44 |
+
EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
|
45 |
+
SPECIAL_TOKENS = (
|
46 |
+
ENDOFTEXT,
|
47 |
+
IMSTART,
|
48 |
+
IMEND,
|
49 |
+
) + EXTRAS
|
50 |
+
IMG_TOKEN_SPAN = 256
|
51 |
+
|
52 |
+
|
53 |
+
def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
|
54 |
+
with open(tiktoken_bpe_file, "rb") as f:
|
55 |
+
contents = f.read()
|
56 |
+
return {
|
57 |
+
base64.b64decode(token): int(rank)
|
58 |
+
for token, rank in (line.split() for line in contents.splitlines() if line)
|
59 |
+
}
|
60 |
+
|
61 |
+
def _list_find(
|
62 |
+
input_list: List[Any],
|
63 |
+
candidates: Tuple[Any],
|
64 |
+
start: int = 0,
|
65 |
+
):
|
66 |
+
for i in range(start, len(input_list)):
|
67 |
+
if input_list[i] in candidates:
|
68 |
+
return i
|
69 |
+
return -1
|
70 |
+
|
71 |
+
def _replace_closed_tag(
|
72 |
+
input_tokens: List[Any],
|
73 |
+
start_tags: Union[Any, Tuple[Any]],
|
74 |
+
end_tags: Union[Any, Tuple[Any]],
|
75 |
+
inclusive_replace_func: Callable,
|
76 |
+
exclusive_replace_func: Callable = lambda x: x,
|
77 |
+
):
|
78 |
+
if isinstance(start_tags, (str, int)):
|
79 |
+
start_tags = (start_tags,)
|
80 |
+
if isinstance(end_tags, (str, int)):
|
81 |
+
end_tags = (end_tags,)
|
82 |
+
assert len(start_tags) == len(end_tags)
|
83 |
+
|
84 |
+
output_tokens = []
|
85 |
+
end = 0
|
86 |
+
while True:
|
87 |
+
start = _list_find(input_tokens, start_tags, end)
|
88 |
+
if start == -1:
|
89 |
+
break
|
90 |
+
output_tokens.extend(exclusive_replace_func(input_tokens[end : start]))
|
91 |
+
tag_idx = start_tags.index(input_tokens[start])
|
92 |
+
end = _list_find(input_tokens, (end_tags[tag_idx],), start)
|
93 |
+
if end == -1:
|
94 |
+
raise ValueError("Unclosed image token")
|
95 |
+
output_tokens.extend(inclusive_replace_func(input_tokens[start : end + 1]))
|
96 |
+
end += 1
|
97 |
+
output_tokens.extend(exclusive_replace_func(input_tokens[end : ]))
|
98 |
+
return output_tokens
|
99 |
+
|
100 |
+
class QWenTokenizer(PreTrainedTokenizer):
|
101 |
+
"""QWen tokenizer."""
|
102 |
+
|
103 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
104 |
+
|
105 |
+
def __init__(
|
106 |
+
self,
|
107 |
+
vocab_file,
|
108 |
+
errors="replace",
|
109 |
+
image_start_tag='<img>',
|
110 |
+
image_end_tag='</img>',
|
111 |
+
image_pad_tag='<imgpad>',
|
112 |
+
ref_start_tag='<ref>',
|
113 |
+
ref_end_tag='</ref>',
|
114 |
+
box_start_tag='<box>',
|
115 |
+
box_end_tag='</box>',
|
116 |
+
quad_start_tag='<quad>',
|
117 |
+
quad_end_tag='</quad>',
|
118 |
+
**kwargs,
|
119 |
+
):
|
120 |
+
super().__init__(**kwargs)
|
121 |
+
self.image_start_tag = image_start_tag
|
122 |
+
self.image_end_tag = image_end_tag
|
123 |
+
self.image_pad_tag = image_pad_tag
|
124 |
+
self.ref_start_tag = ref_start_tag
|
125 |
+
self.ref_end_tag = ref_end_tag
|
126 |
+
self.box_start_tag = box_start_tag
|
127 |
+
self.box_end_tag = box_end_tag
|
128 |
+
self.quad_start_tag = quad_start_tag
|
129 |
+
self.quad_end_tag = quad_end_tag
|
130 |
+
self.IMAGE_ST = (
|
131 |
+
ref_start_tag, ref_end_tag,
|
132 |
+
box_start_tag, box_end_tag,
|
133 |
+
quad_start_tag, quad_end_tag,
|
134 |
+
image_start_tag, image_end_tag,
|
135 |
+
image_pad_tag
|
136 |
+
)
|
137 |
+
|
138 |
+
self.errors = errors # how to handle errors in decoding
|
139 |
+
|
140 |
+
self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
|
141 |
+
self.special_tokens = {
|
142 |
+
token: index
|
143 |
+
for index, token in enumerate(
|
144 |
+
SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
|
145 |
+
)
|
146 |
+
}
|
147 |
+
self.img_start_id = self.special_tokens[self.image_start_tag]
|
148 |
+
self.img_end_id = self.special_tokens[self.image_end_tag]
|
149 |
+
self.img_pad_id = self.special_tokens[self.image_pad_tag]
|
150 |
+
self.ref_start_id = self.special_tokens[self.ref_start_tag]
|
151 |
+
self.ref_end_id = self.special_tokens[self.ref_end_tag]
|
152 |
+
self.box_start_id = self.special_tokens[self.box_start_tag]
|
153 |
+
self.box_end_id = self.special_tokens[self.box_end_tag]
|
154 |
+
self.quad_start_id = self.special_tokens[self.quad_start_tag]
|
155 |
+
self.quad_end_id = self.special_tokens[self.quad_end_tag]
|
156 |
+
self.image_special_tokens = set([
|
157 |
+
self.ref_start_id, self.ref_end_id, self.box_start_id, self.box_end_id,
|
158 |
+
self.quad_start_id, self.quad_end_id,
|
159 |
+
])
|
160 |
+
|
161 |
+
enc = tiktoken.Encoding(
|
162 |
+
"Qwen",
|
163 |
+
pat_str=PAT_STR,
|
164 |
+
mergeable_ranks=self.mergeable_ranks,
|
165 |
+
special_tokens=self.special_tokens,
|
166 |
+
)
|
167 |
+
assert (
|
168 |
+
len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
|
169 |
+
), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
|
170 |
+
|
171 |
+
self.decoder = {
|
172 |
+
v: k for k, v in self.mergeable_ranks.items()
|
173 |
+
} # type: dict[int, bytes|str]
|
174 |
+
self.decoder.update({v: k for k, v in self.special_tokens.items()})
|
175 |
+
|
176 |
+
self.tokenizer = enc # type: tiktoken.Encoding
|
177 |
+
|
178 |
+
self.eod_id = self.tokenizer.eot_token
|
179 |
+
self.im_start_id = self.special_tokens[IMSTART]
|
180 |
+
self.im_end_id = self.special_tokens[IMEND]
|
181 |
+
|
182 |
+
def __getstate__(self):
|
183 |
+
# for pickle lovers
|
184 |
+
state = self.__dict__.copy()
|
185 |
+
del state['tokenizer']
|
186 |
+
return state
|
187 |
+
|
188 |
+
def __setstate__(self, state):
|
189 |
+
# tokenizer is not python native; don't pass it; rebuild it
|
190 |
+
self.__dict__.update(state)
|
191 |
+
enc = tiktoken.Encoding(
|
192 |
+
"Qwen",
|
193 |
+
pat_str=PAT_STR,
|
194 |
+
mergeable_ranks=self.mergeable_ranks,
|
195 |
+
special_tokens=self.special_tokens,
|
196 |
+
)
|
197 |
+
self.tokenizer = enc
|
198 |
+
|
199 |
+
|
200 |
+
def __len__(self) -> int:
|
201 |
+
return self.tokenizer.n_vocab
|
202 |
+
|
203 |
+
def get_vocab(self) -> Dict[bytes, int]:
|
204 |
+
return self.mergeable_ranks
|
205 |
+
|
206 |
+
def convert_tokens_to_ids(
|
207 |
+
self, tokens: Union[bytes, str, List[Union[bytes, str]]]
|
208 |
+
) -> List[int]:
|
209 |
+
ids = []
|
210 |
+
if isinstance(tokens, (str, bytes)):
|
211 |
+
if tokens in self.special_tokens:
|
212 |
+
return self.special_tokens[tokens]
|
213 |
+
else:
|
214 |
+
return self.mergeable_ranks.get(tokens)
|
215 |
+
for token in tokens:
|
216 |
+
if token in self.special_tokens:
|
217 |
+
ids.append(self.special_tokens[token])
|
218 |
+
else:
|
219 |
+
ids.append(self.mergeable_ranks.get(token))
|
220 |
+
return ids
|
221 |
+
|
222 |
+
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
|
223 |
+
if not special_tokens and new_tokens:
|
224 |
+
raise ValueError('Adding regular tokens is not supported')
|
225 |
+
for token in new_tokens:
|
226 |
+
surface_form = token.content if isinstance(token, AddedToken) else token
|
227 |
+
if surface_form not in SPECIAL_TOKENS + self.IMAGE_ST:
|
228 |
+
raise ValueError('Adding unknown special tokens is not supported')
|
229 |
+
return 0
|
230 |
+
|
231 |
+
def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
|
232 |
+
"""
|
233 |
+
Save only the vocabulary of the tokenizer (vocabulary).
|
234 |
+
|
235 |
+
Returns:
|
236 |
+
`Tuple(str)`: Paths to the files saved.
|
237 |
+
"""
|
238 |
+
file_path = os.path.join(save_directory, "qwen.tiktoken")
|
239 |
+
with open(file_path, "w", encoding="utf8") as w:
|
240 |
+
for k, v in self.mergeable_ranks.items():
|
241 |
+
line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
|
242 |
+
w.write(line)
|
243 |
+
return (file_path,)
|
244 |
+
|
245 |
+
def tokenize(
|
246 |
+
self,
|
247 |
+
text: str,
|
248 |
+
allowed_special: Union[Set, str] = "all",
|
249 |
+
disallowed_special: Union[Collection, str] = (),
|
250 |
+
**kwargs,
|
251 |
+
) -> List[Union[bytes, str]]:
|
252 |
+
"""
|
253 |
+
Converts a string in a sequence of tokens.
|
254 |
+
|
255 |
+
Args:
|
256 |
+
text (`str`):
|
257 |
+
The sequence to be encoded.
|
258 |
+
allowed_special (`Literal["all"]` or `set`):
|
259 |
+
The surface forms of the tokens to be encoded as special tokens in regular texts.
|
260 |
+
Default to "all".
|
261 |
+
disallowed_special (`Literal["all"]` or `Collection`):
|
262 |
+
The surface forms of the tokens that should not be in regular texts and trigger errors.
|
263 |
+
Default to an empty tuple.
|
264 |
+
|
265 |
+
kwargs (additional keyword arguments, *optional*):
|
266 |
+
Will be passed to the underlying model specific encode method.
|
267 |
+
|
268 |
+
Returns:
|
269 |
+
`List[bytes|str]`: The list of tokens.
|
270 |
+
"""
|
271 |
+
tokens = []
|
272 |
+
text = unicodedata.normalize("NFC", text)
|
273 |
+
|
274 |
+
# this implementation takes a detour: text -> token id -> token surface forms
|
275 |
+
for t in self.tokenizer.encode(
|
276 |
+
text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
277 |
+
):
|
278 |
+
tokens.append(self.decoder[t])
|
279 |
+
|
280 |
+
def _encode_imgurl(img_tokens):
|
281 |
+
assert img_tokens[0] == self.image_start_tag and img_tokens[-1] == self.image_end_tag
|
282 |
+
img_tokens = img_tokens[1:-1]
|
283 |
+
img_url = b''.join(img_tokens)
|
284 |
+
out_img_tokens = list(map(self.decoder.get, img_url))
|
285 |
+
if len(out_img_tokens) > IMG_TOKEN_SPAN:
|
286 |
+
raise ValueError("The content in {}..{} is too long".format(
|
287 |
+
self.image_start_tag, self.image_end_tag))
|
288 |
+
out_img_tokens.extend([self.image_pad_tag] * (IMG_TOKEN_SPAN - len(out_img_tokens)))
|
289 |
+
out_img_tokens = [self.image_start_tag] + out_img_tokens + [self.image_end_tag]
|
290 |
+
return out_img_tokens
|
291 |
+
|
292 |
+
return _replace_closed_tag(tokens, self.image_start_tag, self.image_end_tag, _encode_imgurl)
|
293 |
+
|
294 |
+
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
|
295 |
+
"""
|
296 |
+
Converts a sequence of tokens in a single string.
|
297 |
+
"""
|
298 |
+
text = ""
|
299 |
+
temp = b""
|
300 |
+
for t in tokens:
|
301 |
+
if isinstance(t, str):
|
302 |
+
if temp:
|
303 |
+
text += temp.decode("utf-8", errors=self.errors)
|
304 |
+
temp = b""
|
305 |
+
text += t
|
306 |
+
elif isinstance(t, bytes):
|
307 |
+
temp += t
|
308 |
+
else:
|
309 |
+
raise TypeError("token should only be of type types or str")
|
310 |
+
if temp:
|
311 |
+
text += temp.decode("utf-8", errors=self.errors)
|
312 |
+
return text
|
313 |
+
|
314 |
+
@property
|
315 |
+
def vocab_size(self):
|
316 |
+
return self.tokenizer.n_vocab
|
317 |
+
|
318 |
+
def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
|
319 |
+
"""Converts an id to a token, special tokens included"""
|
320 |
+
if index in self.decoder:
|
321 |
+
return self.decoder[index]
|
322 |
+
raise ValueError("unknown ids")
|
323 |
+
|
324 |
+
def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
|
325 |
+
"""Converts a token to an id using the vocab, special tokens included"""
|
326 |
+
if token in self.special_tokens:
|
327 |
+
return self.special_tokens[token]
|
328 |
+
if token in self.mergeable_ranks:
|
329 |
+
return self.mergeable_ranks[token]
|
330 |
+
raise ValueError("unknown token")
|
331 |
+
|
332 |
+
def _tokenize(self, text: str, **kwargs):
|
333 |
+
"""
|
334 |
+
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
335 |
+
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
336 |
+
|
337 |
+
Do NOT take care of added tokens.
|
338 |
+
"""
|
339 |
+
raise NotImplementedError
|
340 |
+
|
341 |
+
def _decode(
|
342 |
+
self,
|
343 |
+
token_ids: Union[int, List[int]],
|
344 |
+
skip_special_tokens: bool = False,
|
345 |
+
errors: str = None,
|
346 |
+
**kwargs,
|
347 |
+
) -> str:
|
348 |
+
if isinstance(token_ids, int):
|
349 |
+
token_ids = [token_ids]
|
350 |
+
|
351 |
+
def _decode_imgurl(img_token_ids):
|
352 |
+
assert img_token_ids[0] == self.img_start_id and img_token_ids[-1] == self.img_end_id
|
353 |
+
img_token_ids = img_token_ids[1:-1]
|
354 |
+
img_token_ids = img_token_ids[ : img_token_ids.index(self.img_pad_id)]
|
355 |
+
img_url = bytes(img_token_ids).decode('utf-8')
|
356 |
+
return [self.img_start_id] + self.tokenizer.encode(img_url) + [self.img_end_id]
|
357 |
+
|
358 |
+
token_ids = _replace_closed_tag(token_ids, self.img_start_id, self.img_end_id, _decode_imgurl)
|
359 |
+
|
360 |
+
if skip_special_tokens:
|
361 |
+
if kwargs.get('keep_image_special', False):
|
362 |
+
token_ids = [i for i in token_ids if i < self.eod_id
|
363 |
+
or i in self.image_special_tokens]
|
364 |
+
else:
|
365 |
+
token_ids = [i for i in token_ids if i < self.eod_id]
|
366 |
+
return self.tokenizer.decode(token_ids, errors=errors or self.errors)
|
367 |
+
|
368 |
+
def to_list_format(self, text: str):
|
369 |
+
text = unicodedata.normalize("NFC", text)
|
370 |
+
token_ids = self.tokenizer.encode(
|
371 |
+
text, allowed_special=set(self.IMAGE_ST + (ENDOFTEXT,)))
|
372 |
+
|
373 |
+
def _encode_vl_info(tokens):
|
374 |
+
if len(tokens) == 0:
|
375 |
+
return []
|
376 |
+
if tokens[0] == self.img_start_id and tokens[-1] == self.img_end_id:
|
377 |
+
key = 'image'
|
378 |
+
elif tokens[0] == self.ref_start_id and tokens[-1] == self.ref_end_id:
|
379 |
+
key = 'ref'
|
380 |
+
elif tokens[0] == self.box_start_id and tokens[-1] == self.box_end_id:
|
381 |
+
key = 'box'
|
382 |
+
elif tokens[0] == self.quad_start_id and tokens[-1] == self.quad_end_id:
|
383 |
+
key = 'quad'
|
384 |
+
else:
|
385 |
+
_tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
|
386 |
+
return [{'text': b''.join(map(_tobytes, map(self.decoder.get, tokens))).decode('utf-8')}]
|
387 |
+
_tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
|
388 |
+
val = b''.join(map(_tobytes, map(self.decoder.get, tokens[1:-1]))).decode('utf-8')
|
389 |
+
return [{key: val}]
|
390 |
+
|
391 |
+
return _replace_closed_tag(
|
392 |
+
token_ids,
|
393 |
+
(self.img_start_id, self.ref_start_id, self.box_start_id, self.quad_start_id),
|
394 |
+
(self.img_end_id, self.ref_end_id, self.box_end_id, self.quad_end_id),
|
395 |
+
_encode_vl_info,
|
396 |
+
_encode_vl_info,
|
397 |
+
)
|
398 |
+
|
399 |
+
def from_list_format(self, list_format: List[Dict]):
|
400 |
+
text = ''
|
401 |
+
num_images = 0
|
402 |
+
for ele in list_format:
|
403 |
+
if 'image' in ele:
|
404 |
+
num_images += 1
|
405 |
+
text += f'Picture {num_images}: '
|
406 |
+
text += self.image_start_tag + ele['image'] + self.image_end_tag
|
407 |
+
text += '\n'
|
408 |
+
elif 'text' in ele:
|
409 |
+
text += ele['text']
|
410 |
+
elif 'box' in ele:
|
411 |
+
if 'ref' in ele:
|
412 |
+
text += self.ref_start_tag + ele['ref'] + self.ref_end_tag
|
413 |
+
for box in ele['box']:
|
414 |
+
text += self.box_start_tag + '(%d,%d),(%d,%d)' % (box[0], box[1], box[2], box[3]) + self.box_end_tag
|
415 |
+
else:
|
416 |
+
raise ValueError("Unsupport element: " + str(ele))
|
417 |
+
return text
|
418 |
+
|
419 |
+
def _fetch_latest_picture(self, response, history):
|
420 |
+
if history is None:
|
421 |
+
history = []
|
422 |
+
_history = history + [(response, None)]
|
423 |
+
for q, r in _history[::-1]:
|
424 |
+
for ele in self.to_list_format(q)[::-1]:
|
425 |
+
if 'image' in ele:
|
426 |
+
return ele['image']
|
427 |
+
return None
|
428 |
+
|
429 |
+
def _fetch_all_box_with_ref(self, text):
|
430 |
+
list_format = self.to_list_format(text)
|
431 |
+
output = []
|
432 |
+
for i, ele in enumerate(list_format):
|
433 |
+
if 'box' in ele:
|
434 |
+
bbox = tuple(map(int, ele['box'].replace('(', '').replace(')', '').split(',')))
|
435 |
+
assert len(bbox) == 4
|
436 |
+
output.append({'box': bbox})
|
437 |
+
if i > 0 and 'ref' in list_format[i-1]:
|
438 |
+
output[-1]['ref'] = list_format[i-1]['ref'].strip()
|
439 |
+
return output
|
440 |
+
|
441 |
+
def draw_bbox_on_latest_picture(
|
442 |
+
self,
|
443 |
+
response,
|
444 |
+
history=None,
|
445 |
+
) -> Optional[Image.Image]:
|
446 |
+
image = self._fetch_latest_picture(response, history)
|
447 |
+
if image is None:
|
448 |
+
return None
|
449 |
+
if image.startswith("http://") or image.startswith("https://"):
|
450 |
+
image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
|
451 |
+
h, w = image.height, image.width
|
452 |
+
else:
|
453 |
+
image = np.asarray(Image.open(image).convert("RGB"))
|
454 |
+
h, w = image.shape[0], image.shape[1]
|
455 |
+
visualizer = Visualizer(image)
|
456 |
+
|
457 |
+
boxes = self._fetch_all_box_with_ref(response)
|
458 |
+
if not boxes:
|
459 |
+
return None
|
460 |
+
color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()]) # init color
|
461 |
+
for box in boxes:
|
462 |
+
if 'ref' in box: # random new color for new refexps
|
463 |
+
color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()])
|
464 |
+
x1, y1, x2, y2 = box['box']
|
465 |
+
x1, y1, x2, y2 = (int(x1 / 1000 * w), int(y1 / 1000 * h), int(x2 / 1000 * w), int(y2 / 1000 * h))
|
466 |
+
visualizer.draw_box((x1, y1, x2, y2), alpha=1, edge_color=color)
|
467 |
+
if 'ref' in box:
|
468 |
+
visualizer.draw_text(box['ref'], (x1, y1), color=color, horizontal_alignment="left")
|
469 |
+
return visualizer.output
|
470 |
+
|
471 |
+
|
472 |
+
import colorsys
|
473 |
+
import logging
|
474 |
+
import math
|
475 |
+
import numpy as np
|
476 |
+
import matplotlib as mpl
|
477 |
+
import matplotlib.colors as mplc
|
478 |
+
import matplotlib.figure as mplfigure
|
479 |
+
import torch
|
480 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg
|
481 |
+
from PIL import Image
|
482 |
+
import random
|
483 |
+
|
484 |
+
logger = logging.getLogger(__name__)
|
485 |
+
|
486 |
+
|
487 |
+
class VisImage:
|
488 |
+
def __init__(self, img, scale=1.0):
|
489 |
+
self.img = img
|
490 |
+
self.scale = scale
|
491 |
+
self.width, self.height = img.shape[1], img.shape[0]
|
492 |
+
self._setup_figure(img)
|
493 |
+
|
494 |
+
def _setup_figure(self, img):
|
495 |
+
fig = mplfigure.Figure(frameon=False)
|
496 |
+
self.dpi = fig.get_dpi()
|
497 |
+
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
|
498 |
+
# (https://github.com/matplotlib/matplotlib/issues/15363)
|
499 |
+
fig.set_size_inches(
|
500 |
+
(self.width * self.scale + 1e-2) / self.dpi,
|
501 |
+
(self.height * self.scale + 1e-2) / self.dpi,
|
502 |
+
)
|
503 |
+
self.canvas = FigureCanvasAgg(fig)
|
504 |
+
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
|
505 |
+
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
|
506 |
+
ax.axis("off")
|
507 |
+
self.fig = fig
|
508 |
+
self.ax = ax
|
509 |
+
self.reset_image(img)
|
510 |
+
|
511 |
+
def reset_image(self, img):
|
512 |
+
img = img.astype("uint8")
|
513 |
+
self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
|
514 |
+
|
515 |
+
def save(self, filepath):
|
516 |
+
self.fig.savefig(filepath)
|
517 |
+
|
518 |
+
def get_image(self):
|
519 |
+
canvas = self.canvas
|
520 |
+
s, (width, height) = canvas.print_to_buffer()
|
521 |
+
|
522 |
+
buffer = np.frombuffer(s, dtype="uint8")
|
523 |
+
|
524 |
+
img_rgba = buffer.reshape(height, width, 4)
|
525 |
+
rgb, alpha = np.split(img_rgba, [3], axis=2)
|
526 |
+
return rgb.astype("uint8")
|
527 |
+
|
528 |
+
|
529 |
+
class Visualizer:
|
530 |
+
def __init__(self, img_rgb, metadata=None, scale=1.0):
|
531 |
+
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
|
532 |
+
self.font_path = FONT_PATH
|
533 |
+
self.output = VisImage(self.img, scale=scale)
|
534 |
+
self.cpu_device = torch.device("cpu")
|
535 |
+
|
536 |
+
# too small texts are useless, therefore clamp to 14
|
537 |
+
self._default_font_size = max(
|
538 |
+
np.sqrt(self.output.height * self.output.width) // 30, 15 // scale
|
539 |
+
)
|
540 |
+
|
541 |
+
def draw_text(
|
542 |
+
self,
|
543 |
+
text,
|
544 |
+
position,
|
545 |
+
*,
|
546 |
+
font_size=None,
|
547 |
+
color="g",
|
548 |
+
horizontal_alignment="center",
|
549 |
+
rotation=0,
|
550 |
+
):
|
551 |
+
if not font_size:
|
552 |
+
font_size = self._default_font_size
|
553 |
+
|
554 |
+
# since the text background is dark, we don't want the text to be dark
|
555 |
+
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
|
556 |
+
color[np.argmax(color)] = max(0.8, np.max(color))
|
557 |
+
|
558 |
+
x, y = position
|
559 |
+
self.output.ax.text(
|
560 |
+
x,
|
561 |
+
y,
|
562 |
+
text,
|
563 |
+
size=font_size * self.output.scale,
|
564 |
+
fontproperties=FontProperties(fname=self.font_path),
|
565 |
+
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
|
566 |
+
verticalalignment="top",
|
567 |
+
horizontalalignment=horizontal_alignment,
|
568 |
+
color=color,
|
569 |
+
zorder=10,
|
570 |
+
rotation=rotation,
|
571 |
+
)
|
572 |
+
return self.output
|
573 |
+
|
574 |
+
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
|
575 |
+
|
576 |
+
x0, y0, x1, y1 = box_coord
|
577 |
+
width = x1 - x0
|
578 |
+
height = y1 - y0
|
579 |
+
|
580 |
+
linewidth = max(self._default_font_size / 4, 1)
|
581 |
+
|
582 |
+
self.output.ax.add_patch(
|
583 |
+
mpl.patches.Rectangle(
|
584 |
+
(x0, y0),
|
585 |
+
width,
|
586 |
+
height,
|
587 |
+
fill=False,
|
588 |
+
edgecolor=edge_color,
|
589 |
+
linewidth=linewidth * self.output.scale,
|
590 |
+
alpha=alpha,
|
591 |
+
linestyle=line_style,
|
592 |
+
)
|
593 |
+
)
|
594 |
+
return self.output
|
595 |
+
|
596 |
+
def get_output(self):
|
597 |
+
|
598 |
+
return self.output
|
Qwen-VL-Chat/tokenizer_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_max_length": 8192,
|
3 |
+
"tokenizer_class": "QWenTokenizer",
|
4 |
+
"auto_map": {
|
5 |
+
"AutoTokenizer": [
|
6 |
+
"tokenization_qwen.QWenTokenizer",
|
7 |
+
null
|
8 |
+
]
|
9 |
+
}
|
10 |
+
}
|
Qwen-VL-Chat/visual.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
from collections import OrderedDict
|
7 |
+
import math
|
8 |
+
import requests
|
9 |
+
from io import BytesIO
|
10 |
+
from functools import partial
|
11 |
+
from PIL import Image
|
12 |
+
from typing import Callable, Optional, Sequence, Tuple, List
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
import torch
|
16 |
+
from torch import nn
|
17 |
+
from torch.nn import functional as F
|
18 |
+
from torch.nn.init import trunc_normal_
|
19 |
+
from torchvision import transforms
|
20 |
+
from torchvision.transforms import InterpolationMode
|
21 |
+
|
22 |
+
|
23 |
+
def get_abs_pos(abs_pos, tgt_size):
|
24 |
+
# abs_pos: L, C
|
25 |
+
# tgt_size: M
|
26 |
+
# return: M, C
|
27 |
+
src_size = int(math.sqrt(abs_pos.size(0)))
|
28 |
+
tgt_size = int(math.sqrt(tgt_size))
|
29 |
+
dtype = abs_pos.dtype
|
30 |
+
|
31 |
+
if src_size != tgt_size:
|
32 |
+
return F.interpolate(
|
33 |
+
abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
|
34 |
+
size=(tgt_size, tgt_size),
|
35 |
+
mode="bicubic",
|
36 |
+
align_corners=False,
|
37 |
+
).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
|
38 |
+
else:
|
39 |
+
return abs_pos
|
40 |
+
|
41 |
+
# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
|
42 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
43 |
+
"""
|
44 |
+
grid_size: int of the grid height and width
|
45 |
+
return:
|
46 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
47 |
+
"""
|
48 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
49 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
50 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
51 |
+
grid = np.stack(grid, axis=0)
|
52 |
+
|
53 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
54 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
55 |
+
if cls_token:
|
56 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
57 |
+
return pos_embed
|
58 |
+
|
59 |
+
|
60 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
61 |
+
assert embed_dim % 2 == 0
|
62 |
+
|
63 |
+
# use half of dimensions to encode grid_h
|
64 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
65 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
66 |
+
|
67 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
68 |
+
return emb
|
69 |
+
|
70 |
+
|
71 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
72 |
+
"""
|
73 |
+
embed_dim: output dimension for each position
|
74 |
+
pos: a list of positions to be encoded: size (M,)
|
75 |
+
out: (M, D)
|
76 |
+
"""
|
77 |
+
assert embed_dim % 2 == 0
|
78 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
79 |
+
omega /= embed_dim / 2.
|
80 |
+
omega = 1. / 10000**omega # (D/2,)
|
81 |
+
|
82 |
+
pos = pos.reshape(-1) # (M,)
|
83 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
84 |
+
|
85 |
+
emb_sin = np.sin(out) # (M, D/2)
|
86 |
+
emb_cos = np.cos(out) # (M, D/2)
|
87 |
+
|
88 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
89 |
+
return emb
|
90 |
+
|
91 |
+
|
92 |
+
class Resampler(nn.Module):
|
93 |
+
"""
|
94 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
95 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
96 |
+
Outputs:
|
97 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
98 |
+
"""
|
99 |
+
def __init__(
|
100 |
+
self,
|
101 |
+
grid_size,
|
102 |
+
embed_dim,
|
103 |
+
num_heads,
|
104 |
+
kv_dim=None,
|
105 |
+
norm_layer=nn.LayerNorm
|
106 |
+
):
|
107 |
+
super().__init__()
|
108 |
+
self.num_queries = grid_size ** 2
|
109 |
+
self.embed_dim = embed_dim
|
110 |
+
self.num_heads = num_heads
|
111 |
+
|
112 |
+
self.pos_embed = nn.Parameter(
|
113 |
+
torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
|
114 |
+
).requires_grad_(False)
|
115 |
+
|
116 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
117 |
+
trunc_normal_(self.query, std=.02)
|
118 |
+
|
119 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
120 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
121 |
+
else:
|
122 |
+
self.kv_proj = nn.Identity()
|
123 |
+
|
124 |
+
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
|
125 |
+
self.ln_q = norm_layer(embed_dim)
|
126 |
+
self.ln_kv = norm_layer(embed_dim)
|
127 |
+
|
128 |
+
# self.apply(self._init_weights)
|
129 |
+
|
130 |
+
def _init_weights(self, m):
|
131 |
+
if isinstance(m, nn.Linear):
|
132 |
+
trunc_normal_(m.weight, std=.02)
|
133 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
134 |
+
nn.init.constant_(m.bias, 0)
|
135 |
+
elif isinstance(m, nn.LayerNorm):
|
136 |
+
nn.init.constant_(m.bias, 0)
|
137 |
+
nn.init.constant_(m.weight, 1.0)
|
138 |
+
|
139 |
+
def forward(self, x, attn_mask=None):
|
140 |
+
|
141 |
+
pos_embed = get_abs_pos(self.pos_embed, x.size(1))
|
142 |
+
|
143 |
+
x = self.kv_proj(x)
|
144 |
+
x = self.ln_kv(x).permute(1, 0, 2)
|
145 |
+
|
146 |
+
N = x.shape[1]
|
147 |
+
q = self.ln_q(self.query)
|
148 |
+
out = self.attn(
|
149 |
+
self._repeat(q, N) + self.pos_embed.unsqueeze(1),
|
150 |
+
x + pos_embed.unsqueeze(1),
|
151 |
+
x,
|
152 |
+
attn_mask=attn_mask)[0]
|
153 |
+
return out.permute(1, 0, 2)
|
154 |
+
|
155 |
+
def _repeat(self, query, N: int):
|
156 |
+
return query.unsqueeze(1).repeat(1, N, 1)
|
157 |
+
|
158 |
+
|
159 |
+
class VisualAttention(nn.Module):
|
160 |
+
"""self-attention layer class.
|
161 |
+
|
162 |
+
Self-attention layer takes input with size [s, b, h]
|
163 |
+
and returns output of the same size.
|
164 |
+
"""
|
165 |
+
|
166 |
+
def __init__(self, embed_dim, num_heads,
|
167 |
+
bias=True, kdim=None, vdim=None):
|
168 |
+
super(VisualAttention, self).__init__()
|
169 |
+
self.embed_dim = embed_dim
|
170 |
+
self.kdim = kdim if kdim is not None else embed_dim
|
171 |
+
self.vdim = vdim if vdim is not None else embed_dim
|
172 |
+
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
173 |
+
|
174 |
+
self.num_heads = num_heads
|
175 |
+
|
176 |
+
# Per attention head and per partition values.
|
177 |
+
assert embed_dim % num_heads == 0
|
178 |
+
self.hidden_size_per_attention_head = embed_dim // num_heads
|
179 |
+
self.num_attention_heads_per_partition = num_heads
|
180 |
+
self.hidden_size_per_partition = embed_dim
|
181 |
+
|
182 |
+
# Strided linear layer.
|
183 |
+
assert self._qkv_same_embed_dim, 'Only Support SelfAttention Currently'
|
184 |
+
self.in_proj = nn.Linear(embed_dim, 3 * embed_dim)
|
185 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim)
|
186 |
+
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
|
187 |
+
|
188 |
+
def forward(self, query, key, value, attn_mask = None):
|
189 |
+
# query/key/value: [sq, b, h]
|
190 |
+
sq, b, _ = query.size()
|
191 |
+
|
192 |
+
assert torch.allclose(query, key), 'Only Support Self-Attention Currently'
|
193 |
+
sk = sq
|
194 |
+
mixed_x_layer = self.in_proj(query)
|
195 |
+
|
196 |
+
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
|
197 |
+
new_tensor_shape = mixed_x_layer.size()[:-1] + \
|
198 |
+
(self.num_attention_heads_per_partition,
|
199 |
+
3 * self.hidden_size_per_attention_head)
|
200 |
+
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
|
201 |
+
|
202 |
+
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
|
203 |
+
query_layer, key_layer, value_layer = mixed_x_layer.split(
|
204 |
+
self.hidden_size_per_attention_head, dim=-1)
|
205 |
+
|
206 |
+
# [sq, b, np, hn] -> [sq, b * np, hn]
|
207 |
+
query_layer = query_layer.view(sq,
|
208 |
+
b * self.num_attention_heads_per_partition,
|
209 |
+
self.hidden_size_per_attention_head).transpose(0, 1)
|
210 |
+
# [sk, b, np, hn] -> [sk, b * np, hn]
|
211 |
+
key_layer = key_layer.view(sk,
|
212 |
+
b * self.num_attention_heads_per_partition,
|
213 |
+
self.hidden_size_per_attention_head).transpose(0, 1)
|
214 |
+
|
215 |
+
q_scaled = query_layer / self.norm_factor
|
216 |
+
if attn_mask is not None:
|
217 |
+
attention_probs = torch.baddbmm(attn_mask, q_scaled, key_layer.transpose(-2, -1))
|
218 |
+
else:
|
219 |
+
attention_probs = torch.bmm(q_scaled, key_layer.transpose(-2, -1))
|
220 |
+
attention_probs = attention_probs.softmax(dim=-1)
|
221 |
+
|
222 |
+
value_layer = value_layer.view(sk,
|
223 |
+
b * self.num_attention_heads_per_partition,
|
224 |
+
self.hidden_size_per_attention_head).transpose(0, 1)
|
225 |
+
|
226 |
+
# matmul: [b * np, sq, hn]
|
227 |
+
context_layer = torch.bmm(attention_probs, value_layer)
|
228 |
+
|
229 |
+
# change view [b, np, sq, hn]
|
230 |
+
context_layer = context_layer.view(b,
|
231 |
+
self.num_attention_heads_per_partition,
|
232 |
+
sq, self.hidden_size_per_attention_head)
|
233 |
+
|
234 |
+
# [b, np, sq, hn] --> [sq, b, np, hn]
|
235 |
+
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
|
236 |
+
|
237 |
+
# [sq, b, np, hn] --> [sq, b, hp]
|
238 |
+
new_context_layer_shape = context_layer.size()[:-2] + \
|
239 |
+
(self.hidden_size_per_partition,)
|
240 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
241 |
+
|
242 |
+
output = self.out_proj(context_layer)
|
243 |
+
|
244 |
+
return output
|
245 |
+
|
246 |
+
|
247 |
+
class VisualAttentionBlock(nn.Module):
|
248 |
+
def __init__(
|
249 |
+
self,
|
250 |
+
d_model: int,
|
251 |
+
n_head: int,
|
252 |
+
mlp_ratio: float = 4.0,
|
253 |
+
act_layer: Callable = nn.GELU,
|
254 |
+
norm_layer: Callable = nn.LayerNorm,
|
255 |
+
is_cross_attention: bool = False,
|
256 |
+
):
|
257 |
+
super().__init__()
|
258 |
+
|
259 |
+
self.ln_1 = norm_layer(d_model)
|
260 |
+
if is_cross_attention:
|
261 |
+
self.ln_1_kv = norm_layer(d_model)
|
262 |
+
|
263 |
+
self.ln_2 = norm_layer(d_model)
|
264 |
+
mlp_width = int(d_model * mlp_ratio)
|
265 |
+
self.attn = VisualAttention(d_model, n_head)
|
266 |
+
self.mlp = nn.Sequential(OrderedDict([
|
267 |
+
("c_fc", nn.Linear(d_model, mlp_width)),
|
268 |
+
("gelu", act_layer()),
|
269 |
+
("c_proj", nn.Linear(mlp_width, d_model))
|
270 |
+
]))
|
271 |
+
|
272 |
+
def attention(
|
273 |
+
self,
|
274 |
+
q_x: torch.Tensor,
|
275 |
+
k_x: Optional[torch.Tensor] = None,
|
276 |
+
v_x: Optional[torch.Tensor] = None,
|
277 |
+
attn_mask: Optional[torch.Tensor] = None,
|
278 |
+
):
|
279 |
+
k_x = k_x if k_x is not None else q_x
|
280 |
+
v_x = v_x if v_x is not None else q_x
|
281 |
+
|
282 |
+
attn_mask = attn_mask.to(q_x.dtype) if attn_mask is not None else None
|
283 |
+
return self.attn(q_x, k_x, v_x, attn_mask=attn_mask)
|
284 |
+
|
285 |
+
def forward(
|
286 |
+
self,
|
287 |
+
q_x: torch.Tensor,
|
288 |
+
k_x: Optional[torch.Tensor] = None,
|
289 |
+
v_x: Optional[torch.Tensor] = None,
|
290 |
+
attn_mask: Optional[torch.Tensor] = None,
|
291 |
+
):
|
292 |
+
k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
|
293 |
+
v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
|
294 |
+
|
295 |
+
x = q_x + self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask)
|
296 |
+
x = x + self.mlp(self.ln_2(x))
|
297 |
+
return x
|
298 |
+
|
299 |
+
|
300 |
+
class TransformerBlock(nn.Module):
|
301 |
+
def __init__(
|
302 |
+
self,
|
303 |
+
width: int,
|
304 |
+
layers: int,
|
305 |
+
heads: int,
|
306 |
+
mlp_ratio: float = 4.0,
|
307 |
+
act_layer: Callable = nn.GELU,
|
308 |
+
norm_layer: Callable = nn.LayerNorm,
|
309 |
+
):
|
310 |
+
super().__init__()
|
311 |
+
self.width = width
|
312 |
+
self.layers = layers
|
313 |
+
|
314 |
+
self.resblocks = nn.ModuleList([
|
315 |
+
VisualAttentionBlock(
|
316 |
+
width, heads, mlp_ratio, act_layer=act_layer, norm_layer=norm_layer)
|
317 |
+
for _ in range(layers)
|
318 |
+
])
|
319 |
+
|
320 |
+
def get_cast_dtype(self) -> torch.dtype:
|
321 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
322 |
+
|
323 |
+
def get_cast_device(self) -> torch.device:
|
324 |
+
return self.resblocks[0].mlp.c_fc.weight.device
|
325 |
+
|
326 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
327 |
+
for r in self.resblocks:
|
328 |
+
x = r(x, attn_mask=attn_mask)
|
329 |
+
return x
|
330 |
+
|
331 |
+
|
332 |
+
class VisionTransformer(nn.Module):
|
333 |
+
|
334 |
+
def __init__(
|
335 |
+
self,
|
336 |
+
image_size: int,
|
337 |
+
patch_size: int,
|
338 |
+
width: int,
|
339 |
+
layers: int,
|
340 |
+
heads: int,
|
341 |
+
mlp_ratio: float,
|
342 |
+
n_queries: int = 256,
|
343 |
+
output_dim: int = 512,
|
344 |
+
**kwargs
|
345 |
+
):
|
346 |
+
super().__init__()
|
347 |
+
image_height, image_width = self.image_size = (image_size, image_size)
|
348 |
+
patch_height, patch_width = self.patch_size = (patch_size, patch_size)
|
349 |
+
self.grid_size = (image_height // patch_height, image_width // patch_width)
|
350 |
+
self.output_dim = output_dim
|
351 |
+
|
352 |
+
mean = (0.48145466, 0.4578275, 0.40821073)
|
353 |
+
std = (0.26862954, 0.26130258, 0.27577711)
|
354 |
+
self.image_transform = transforms.Compose([
|
355 |
+
transforms.Resize(
|
356 |
+
(image_size, image_size),
|
357 |
+
interpolation=InterpolationMode.BICUBIC
|
358 |
+
),
|
359 |
+
transforms.ToTensor(),
|
360 |
+
transforms.Normalize(mean=mean, std=std),
|
361 |
+
])
|
362 |
+
|
363 |
+
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
364 |
+
|
365 |
+
# class embeddings and positional embeddings
|
366 |
+
scale = width ** -0.5
|
367 |
+
self.positional_embedding = nn.Parameter(scale * torch.randn(256, width))
|
368 |
+
|
369 |
+
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
370 |
+
act_layer = nn.GELU
|
371 |
+
|
372 |
+
self.ln_pre = norm_layer(width)
|
373 |
+
self.transformer = TransformerBlock(
|
374 |
+
width,
|
375 |
+
layers,
|
376 |
+
heads,
|
377 |
+
mlp_ratio,
|
378 |
+
act_layer=act_layer,
|
379 |
+
norm_layer=norm_layer,
|
380 |
+
)
|
381 |
+
|
382 |
+
self.attn_pool = Resampler(
|
383 |
+
grid_size=int(math.sqrt(n_queries)),
|
384 |
+
embed_dim=output_dim,
|
385 |
+
num_heads=output_dim // 128,
|
386 |
+
kv_dim=width,
|
387 |
+
norm_layer=norm_layer,
|
388 |
+
)
|
389 |
+
self.ln_post = norm_layer(output_dim)
|
390 |
+
self.proj = nn.Parameter((output_dim** -0.5) * torch.randn(output_dim, output_dim))
|
391 |
+
|
392 |
+
def forward(self, x: torch.Tensor):
|
393 |
+
x = x.to(
|
394 |
+
dtype=self.transformer.get_cast_dtype(),
|
395 |
+
device=self.transformer.get_cast_device(),
|
396 |
+
)
|
397 |
+
# to patches
|
398 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
399 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
400 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
401 |
+
|
402 |
+
x = x + get_abs_pos(self.positional_embedding, x.size(1))
|
403 |
+
|
404 |
+
x = self.ln_pre(x)
|
405 |
+
|
406 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
407 |
+
x = self.transformer(x)
|
408 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
409 |
+
|
410 |
+
x = self.attn_pool(x)
|
411 |
+
x = self.ln_post(x)
|
412 |
+
x = x @ self.proj
|
413 |
+
|
414 |
+
return x
|
415 |
+
|
416 |
+
def encode(self, image_paths: List[str]):
|
417 |
+
images = []
|
418 |
+
for image_path in image_paths:
|
419 |
+
if image_path.startswith("http://") or image_path.startswith("https://"):
|
420 |
+
image = Image.open(requests.get(image_path, stream=True).raw)
|
421 |
+
else:
|
422 |
+
image = Image.open(image_path)
|
423 |
+
image = image.convert("RGB")
|
424 |
+
images.append(self.image_transform(image))
|
425 |
+
images = torch.stack(images, dim=0)
|
426 |
+
return self(images)
|
QwenViT/qwen_vit_G.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:398d0e75f09d958acdda01b8d685dbd0974abd4f4d404d0a0d9b638799d4d136
|
3 |
+
size 3871440720
|
SEED-X-17B/README.md
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
license_name: license-seed-x-17b
|
4 |
+
license_link: LICENSE
|
5 |
+
---
|
6 |
+
|
7 |
+
# SEED-X
|
8 |
+
[](https://arxiv.org/abs/2404.14396) [](https://139a5c1d085953f17b.gradio.live/)
|
9 |
+
|
10 |
+
We introduce SEED-X, a unified and versatile foundation model, which can serve as various multimodal AI assistants **in the real world** after different instruction tuning, capable of responding to a variety of user needs through unifying **multi-granularity comprehension and generation**.
|
11 |
+
|
12 |
+
All models and inference code are released!
|
13 |
+
|
14 |
+
## News
|
15 |
+
**2024-04-22** :hugs: We release the [models](https://huggingface.co/AILab-CVC/SEED-X-17B) including the pre-trained foundation model **SEED-X**, the general instruction-tuned model **SEED-X-I**, the editing model **SEED-X-Edit**, and our de-tokenier, which can generate realistic images from ViT features (w/o or w/ a condition image).
|
16 |
+
|
17 |
+
**2024-04-22** :hugs: We release an online [gradio demo](https://139a5c1d085953f17b.gradio.live/) of a general instruction-tuned model SEED-X-I. SEED-X-I can follow multimodal instruction (including images with dynamic resolutions) and make responses with images, texts and bounding boxes in multi-turn conversation. SEED-X-I **does not support image manipulation**. If you want to experience SEED-X-Edit for high-precision image editing, the inference code and model will be released soon.
|
18 |
+
|
19 |
+
## TODOs
|
20 |
+
- [x] Release the multimodal foundation model SEED-X.
|
21 |
+
- [x] Release the instruction-tuned model SEED-X-Edit for high-precision image editing.
|
22 |
+
- [x] Release 3.7M in-house image editing data.
|
23 |
+
|
24 |
+

|
25 |
+
|
26 |
+

|
27 |
+
|
28 |
+
|
29 |
+
## Usage
|
30 |
+
|
31 |
+
### Dependencies
|
32 |
+
- Python >= 3.8 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux))
|
33 |
+
- [PyTorch >=2.0.1](https://pytorch.org/)
|
34 |
+
- NVIDIA GPU + [CUDA](https://developer.nvidia.com/cuda-downloads)
|
35 |
+
|
36 |
+
### Installation
|
37 |
+
Clone the repo and install dependent packages
|
38 |
+
|
39 |
+
```bash
|
40 |
+
git clone https://github.com/AILab-CVC/SEED-X.git
|
41 |
+
cd SEED-X
|
42 |
+
pip install -r requirements.txt
|
43 |
+
```
|
44 |
+
|
45 |
+
### Model Weights
|
46 |
+
We release the pretrained De-Tokenizer, the pre-trained foundation model **SEED-X**, the general instruction-tuned model **SEED-X-I**, the editing model **SEED-X-Edit** in in [SEED-X-17B Hugging Face](https://huggingface.co/AILab-CVC/SEED-X-17B).
|
47 |
+
|
48 |
+
You can also download them separately as below,
|
49 |
+
- Check the SEED-X de-tokenizer weights in [AILab-CVC/seed-x-17b-de-tokenizer](https://huggingface.co/AILab-CVC/seed-x-17b-de-tokenizer)
|
50 |
+
- Check the pre-trained foundation model **SEED-X** weights in [AILab-CVC/seed-x-17b-pretrain](https://huggingface.co/AILab-CVC/seed-x-17b-pretrain)
|
51 |
+
- Check the general instruction-tuned model **SEED-X-I** weights in [AILab-CVC/seed-x-17b-instruct](https://huggingface.co/AILab-CVC/seed-x-17b-instruct)
|
52 |
+
- Check the editing model **SEED-X-Edit** weights in [AILab-CVC/seed-x-17b-edit](https://huggingface.co/AILab-CVC/seed-x-17b-edit)
|
53 |
+
|
54 |
+
Please download the checkpoints and save them under the folder `./pretrained`. For example, `./pretrained/seed_x`.
|
55 |
+
|
56 |
+
You also need to download [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and [Qwen-VL-Chat](https://huggingface.co/Qwen/Qwen-VL-Chat), and save them under the folder `./pretrained`. Please use the following script to extract the weights of visual encoder in Qwen-VL-Chat.
|
57 |
+
```bash
|
58 |
+
python3 src/tools/reload_qwen_vit.py
|
59 |
+
```
|
60 |
+
### Inference with SEED-X De-tokenizer
|
61 |
+
```bash
|
62 |
+
# For image reconstruction with ViT image features
|
63 |
+
python3 src/inference/eval_seed_x_detokenizer.py
|
64 |
+
# For image reconstruction with ViT image features and conditional image
|
65 |
+
python3 src/inference/eval_seed_x_detokenizer_with_condition.py
|
66 |
+
```
|
67 |
+
|
68 |
+
### Inference with pre-trained model SEED-X
|
69 |
+
```bash
|
70 |
+
# For image comprehension and detection
|
71 |
+
python3 src/inference/eval_img2text_seed_x.py
|
72 |
+
# For image generation
|
73 |
+
python3 src/inference/eval_text2img_seed_x.py
|
74 |
+
```
|
75 |
+
|
76 |
+
### Inference with the general instruction-tuned model SEED-X-I
|
77 |
+
```bash
|
78 |
+
# For image comprehension and detection
|
79 |
+
python3 src/inference/eval_img2text_seed_x_i.py
|
80 |
+
# For image generation
|
81 |
+
python3 src/inference/eval_text2img_seed_x_i.py
|
82 |
+
```
|
83 |
+
|
84 |
+
### Inference with the editing model SEED-X-Edit
|
85 |
+
```bash
|
86 |
+
# For image editing
|
87 |
+
python3 src/inference/eval_img2edit_seed_x_edit.py
|
88 |
+
```
|
89 |
+
|
90 |
+
## Citation
|
91 |
+
If you find the work helpful, please consider citing:
|
92 |
+
```bash
|
93 |
+
@article{ge2024seed,
|
94 |
+
title={SEED-X: Multimodal Models with Unified Multi-granularity Comprehension and Generation},
|
95 |
+
author={Ge, Yuying and Zhao, Sijie and Zhu, Jinguo and Ge, Yixiao and Yi, Kun and Song, Lin and Li, Chen and Ding, Xiaohan and Shan, Ying},
|
96 |
+
journal={arXiv preprint arXiv:2404.14396},
|
97 |
+
year={2024}
|
98 |
+
}
|
99 |
+
```
|
100 |
+
|
101 |
+
|
102 |
+
## License
|
103 |
+
`SEED` is licensed under the Apache License Version 2.0 except for the third-party components listed in [License](License_Seed-X.txt).
|
104 |
+
|
105 |
+
During training SEED-X, we freeze the original parameters of LLaMA2 and optimize the LoRA module.
|
cvlm_llama2_tokenizer_100img_and_224loc_addpatch/added_tokens.json
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</img>": 32101,
|
3 |
+
"</patch>": 32103,
|
4 |
+
"<box_end>": 32329,
|
5 |
+
"<box_start>": 32328,
|
6 |
+
"<img>": 32100,
|
7 |
+
"<img_00000>": 32000,
|
8 |
+
"<img_00001>": 32001,
|
9 |
+
"<img_00002>": 32002,
|
10 |
+
"<img_00003>": 32003,
|
11 |
+
"<img_00004>": 32004,
|
12 |
+
"<img_00005>": 32005,
|
13 |
+
"<img_00006>": 32006,
|
14 |
+
"<img_00007>": 32007,
|
15 |
+
"<img_00008>": 32008,
|
16 |
+
"<img_00009>": 32009,
|
17 |
+
"<img_00010>": 32010,
|
18 |
+
"<img_00011>": 32011,
|
19 |
+
"<img_00012>": 32012,
|
20 |
+
"<img_00013>": 32013,
|
21 |
+
"<img_00014>": 32014,
|
22 |
+
"<img_00015>": 32015,
|
23 |
+
"<img_00016>": 32016,
|
24 |
+
"<img_00017>": 32017,
|
25 |
+
"<img_00018>": 32018,
|
26 |
+
"<img_00019>": 32019,
|
27 |
+
"<img_00020>": 32020,
|
28 |
+
"<img_00021>": 32021,
|
29 |
+
"<img_00022>": 32022,
|
30 |
+
"<img_00023>": 32023,
|
31 |
+
"<img_00024>": 32024,
|
32 |
+
"<img_00025>": 32025,
|
33 |
+
"<img_00026>": 32026,
|
34 |
+
"<img_00027>": 32027,
|
35 |
+
"<img_00028>": 32028,
|
36 |
+
"<img_00029>": 32029,
|
37 |
+
"<img_00030>": 32030,
|
38 |
+
"<img_00031>": 32031,
|
39 |
+
"<img_00032>": 32032,
|
40 |
+
"<img_00033>": 32033,
|
41 |
+
"<img_00034>": 32034,
|
42 |
+
"<img_00035>": 32035,
|
43 |
+
"<img_00036>": 32036,
|
44 |
+
"<img_00037>": 32037,
|
45 |
+
"<img_00038>": 32038,
|
46 |
+
"<img_00039>": 32039,
|
47 |
+
"<img_00040>": 32040,
|
48 |
+
"<img_00041>": 32041,
|
49 |
+
"<img_00042>": 32042,
|
50 |
+
"<img_00043>": 32043,
|
51 |
+
"<img_00044>": 32044,
|
52 |
+
"<img_00045>": 32045,
|
53 |
+
"<img_00046>": 32046,
|
54 |
+
"<img_00047>": 32047,
|
55 |
+
"<img_00048>": 32048,
|
56 |
+
"<img_00049>": 32049,
|
57 |
+
"<img_00050>": 32050,
|
58 |
+
"<img_00051>": 32051,
|
59 |
+
"<img_00052>": 32052,
|
60 |
+
"<img_00053>": 32053,
|
61 |
+
"<img_00054>": 32054,
|
62 |
+
"<img_00055>": 32055,
|
63 |
+
"<img_00056>": 32056,
|
64 |
+
"<img_00057>": 32057,
|
65 |
+
"<img_00058>": 32058,
|
66 |
+
"<img_00059>": 32059,
|
67 |
+
"<img_00060>": 32060,
|
68 |
+
"<img_00061>": 32061,
|
69 |
+
"<img_00062>": 32062,
|
70 |
+
"<img_00063>": 32063,
|
71 |
+
"<img_00064>": 32064,
|
72 |
+
"<img_00065>": 32065,
|
73 |
+
"<img_00066>": 32066,
|
74 |
+
"<img_00067>": 32067,
|
75 |
+
"<img_00068>": 32068,
|
76 |
+
"<img_00069>": 32069,
|
77 |
+
"<img_00070>": 32070,
|
78 |
+
"<img_00071>": 32071,
|
79 |
+
"<img_00072>": 32072,
|
80 |
+
"<img_00073>": 32073,
|
81 |
+
"<img_00074>": 32074,
|
82 |
+
"<img_00075>": 32075,
|
83 |
+
"<img_00076>": 32076,
|
84 |
+
"<img_00077>": 32077,
|
85 |
+
"<img_00078>": 32078,
|
86 |
+
"<img_00079>": 32079,
|
87 |
+
"<img_00080>": 32080,
|
88 |
+
"<img_00081>": 32081,
|
89 |
+
"<img_00082>": 32082,
|
90 |
+
"<img_00083>": 32083,
|
91 |
+
"<img_00084>": 32084,
|
92 |
+
"<img_00085>": 32085,
|
93 |
+
"<img_00086>": 32086,
|
94 |
+
"<img_00087>": 32087,
|
95 |
+
"<img_00088>": 32088,
|
96 |
+
"<img_00089>": 32089,
|
97 |
+
"<img_00090>": 32090,
|
98 |
+
"<img_00091>": 32091,
|
99 |
+
"<img_00092>": 32092,
|
100 |
+
"<img_00093>": 32093,
|
101 |
+
"<img_00094>": 32094,
|
102 |
+
"<img_00095>": 32095,
|
103 |
+
"<img_00096>": 32096,
|
104 |
+
"<img_00097>": 32097,
|
105 |
+
"<img_00098>": 32098,
|
106 |
+
"<img_00099>": 32099,
|
107 |
+
"<loc-0>": 32104,
|
108 |
+
"<loc-100>": 32204,
|
109 |
+
"<loc-101>": 32205,
|
110 |
+
"<loc-102>": 32206,
|
111 |
+
"<loc-103>": 32207,
|
112 |
+
"<loc-104>": 32208,
|
113 |
+
"<loc-105>": 32209,
|
114 |
+
"<loc-106>": 32210,
|
115 |
+
"<loc-107>": 32211,
|
116 |
+
"<loc-108>": 32212,
|
117 |
+
"<loc-109>": 32213,
|
118 |
+
"<loc-10>": 32114,
|
119 |
+
"<loc-110>": 32214,
|
120 |
+
"<loc-111>": 32215,
|
121 |
+
"<loc-112>": 32216,
|
122 |
+
"<loc-113>": 32217,
|
123 |
+
"<loc-114>": 32218,
|
124 |
+
"<loc-115>": 32219,
|
125 |
+
"<loc-116>": 32220,
|
126 |
+
"<loc-117>": 32221,
|
127 |
+
"<loc-118>": 32222,
|
128 |
+
"<loc-119>": 32223,
|
129 |
+
"<loc-11>": 32115,
|
130 |
+
"<loc-120>": 32224,
|
131 |
+
"<loc-121>": 32225,
|
132 |
+
"<loc-122>": 32226,
|
133 |
+
"<loc-123>": 32227,
|
134 |
+
"<loc-124>": 32228,
|
135 |
+
"<loc-125>": 32229,
|
136 |
+
"<loc-126>": 32230,
|
137 |
+
"<loc-127>": 32231,
|
138 |
+
"<loc-128>": 32232,
|
139 |
+
"<loc-129>": 32233,
|
140 |
+
"<loc-12>": 32116,
|
141 |
+
"<loc-130>": 32234,
|
142 |
+
"<loc-131>": 32235,
|
143 |
+
"<loc-132>": 32236,
|
144 |
+
"<loc-133>": 32237,
|
145 |
+
"<loc-134>": 32238,
|
146 |
+
"<loc-135>": 32239,
|
147 |
+
"<loc-136>": 32240,
|
148 |
+
"<loc-137>": 32241,
|
149 |
+
"<loc-138>": 32242,
|
150 |
+
"<loc-139>": 32243,
|
151 |
+
"<loc-13>": 32117,
|
152 |
+
"<loc-140>": 32244,
|
153 |
+
"<loc-141>": 32245,
|
154 |
+
"<loc-142>": 32246,
|
155 |
+
"<loc-143>": 32247,
|
156 |
+
"<loc-144>": 32248,
|
157 |
+
"<loc-145>": 32249,
|
158 |
+
"<loc-146>": 32250,
|
159 |
+
"<loc-147>": 32251,
|
160 |
+
"<loc-148>": 32252,
|
161 |
+
"<loc-149>": 32253,
|
162 |
+
"<loc-14>": 32118,
|
163 |
+
"<loc-150>": 32254,
|
164 |
+
"<loc-151>": 32255,
|
165 |
+
"<loc-152>": 32256,
|
166 |
+
"<loc-153>": 32257,
|
167 |
+
"<loc-154>": 32258,
|
168 |
+
"<loc-155>": 32259,
|
169 |
+
"<loc-156>": 32260,
|
170 |
+
"<loc-157>": 32261,
|
171 |
+
"<loc-158>": 32262,
|
172 |
+
"<loc-159>": 32263,
|
173 |
+
"<loc-15>": 32119,
|
174 |
+
"<loc-160>": 32264,
|
175 |
+
"<loc-161>": 32265,
|
176 |
+
"<loc-162>": 32266,
|
177 |
+
"<loc-163>": 32267,
|
178 |
+
"<loc-164>": 32268,
|
179 |
+
"<loc-165>": 32269,
|
180 |
+
"<loc-166>": 32270,
|
181 |
+
"<loc-167>": 32271,
|
182 |
+
"<loc-168>": 32272,
|
183 |
+
"<loc-169>": 32273,
|
184 |
+
"<loc-16>": 32120,
|
185 |
+
"<loc-170>": 32274,
|
186 |
+
"<loc-171>": 32275,
|
187 |
+
"<loc-172>": 32276,
|
188 |
+
"<loc-173>": 32277,
|
189 |
+
"<loc-174>": 32278,
|
190 |
+
"<loc-175>": 32279,
|
191 |
+
"<loc-176>": 32280,
|
192 |
+
"<loc-177>": 32281,
|
193 |
+
"<loc-178>": 32282,
|
194 |
+
"<loc-179>": 32283,
|
195 |
+
"<loc-17>": 32121,
|
196 |
+
"<loc-180>": 32284,
|
197 |
+
"<loc-181>": 32285,
|
198 |
+
"<loc-182>": 32286,
|
199 |
+
"<loc-183>": 32287,
|
200 |
+
"<loc-184>": 32288,
|
201 |
+
"<loc-185>": 32289,
|
202 |
+
"<loc-186>": 32290,
|
203 |
+
"<loc-187>": 32291,
|
204 |
+
"<loc-188>": 32292,
|
205 |
+
"<loc-189>": 32293,
|
206 |
+
"<loc-18>": 32122,
|
207 |
+
"<loc-190>": 32294,
|
208 |
+
"<loc-191>": 32295,
|
209 |
+
"<loc-192>": 32296,
|
210 |
+
"<loc-193>": 32297,
|
211 |
+
"<loc-194>": 32298,
|
212 |
+
"<loc-195>": 32299,
|
213 |
+
"<loc-196>": 32300,
|
214 |
+
"<loc-197>": 32301,
|
215 |
+
"<loc-198>": 32302,
|
216 |
+
"<loc-199>": 32303,
|
217 |
+
"<loc-19>": 32123,
|
218 |
+
"<loc-1>": 32105,
|
219 |
+
"<loc-200>": 32304,
|
220 |
+
"<loc-201>": 32305,
|
221 |
+
"<loc-202>": 32306,
|
222 |
+
"<loc-203>": 32307,
|
223 |
+
"<loc-204>": 32308,
|
224 |
+
"<loc-205>": 32309,
|
225 |
+
"<loc-206>": 32310,
|
226 |
+
"<loc-207>": 32311,
|
227 |
+
"<loc-208>": 32312,
|
228 |
+
"<loc-209>": 32313,
|
229 |
+
"<loc-20>": 32124,
|
230 |
+
"<loc-210>": 32314,
|
231 |
+
"<loc-211>": 32315,
|
232 |
+
"<loc-212>": 32316,
|
233 |
+
"<loc-213>": 32317,
|
234 |
+
"<loc-214>": 32318,
|
235 |
+
"<loc-215>": 32319,
|
236 |
+
"<loc-216>": 32320,
|
237 |
+
"<loc-217>": 32321,
|
238 |
+
"<loc-218>": 32322,
|
239 |
+
"<loc-219>": 32323,
|
240 |
+
"<loc-21>": 32125,
|
241 |
+
"<loc-220>": 32324,
|
242 |
+
"<loc-221>": 32325,
|
243 |
+
"<loc-222>": 32326,
|
244 |
+
"<loc-223>": 32327,
|
245 |
+
"<loc-22>": 32126,
|
246 |
+
"<loc-23>": 32127,
|
247 |
+
"<loc-24>": 32128,
|
248 |
+
"<loc-25>": 32129,
|
249 |
+
"<loc-26>": 32130,
|
250 |
+
"<loc-27>": 32131,
|
251 |
+
"<loc-28>": 32132,
|
252 |
+
"<loc-29>": 32133,
|
253 |
+
"<loc-2>": 32106,
|
254 |
+
"<loc-30>": 32134,
|
255 |
+
"<loc-31>": 32135,
|
256 |
+
"<loc-32>": 32136,
|
257 |
+
"<loc-33>": 32137,
|
258 |
+
"<loc-34>": 32138,
|
259 |
+
"<loc-35>": 32139,
|
260 |
+
"<loc-36>": 32140,
|
261 |
+
"<loc-37>": 32141,
|
262 |
+
"<loc-38>": 32142,
|
263 |
+
"<loc-39>": 32143,
|
264 |
+
"<loc-3>": 32107,
|
265 |
+
"<loc-40>": 32144,
|
266 |
+
"<loc-41>": 32145,
|
267 |
+
"<loc-42>": 32146,
|
268 |
+
"<loc-43>": 32147,
|
269 |
+
"<loc-44>": 32148,
|
270 |
+
"<loc-45>": 32149,
|
271 |
+
"<loc-46>": 32150,
|
272 |
+
"<loc-47>": 32151,
|
273 |
+
"<loc-48>": 32152,
|
274 |
+
"<loc-49>": 32153,
|
275 |
+
"<loc-4>": 32108,
|
276 |
+
"<loc-50>": 32154,
|
277 |
+
"<loc-51>": 32155,
|
278 |
+
"<loc-52>": 32156,
|
279 |
+
"<loc-53>": 32157,
|
280 |
+
"<loc-54>": 32158,
|
281 |
+
"<loc-55>": 32159,
|
282 |
+
"<loc-56>": 32160,
|
283 |
+
"<loc-57>": 32161,
|
284 |
+
"<loc-58>": 32162,
|
285 |
+
"<loc-59>": 32163,
|
286 |
+
"<loc-5>": 32109,
|
287 |
+
"<loc-60>": 32164,
|
288 |
+
"<loc-61>": 32165,
|
289 |
+
"<loc-62>": 32166,
|
290 |
+
"<loc-63>": 32167,
|
291 |
+
"<loc-64>": 32168,
|
292 |
+
"<loc-65>": 32169,
|
293 |
+
"<loc-66>": 32170,
|
294 |
+
"<loc-67>": 32171,
|
295 |
+
"<loc-68>": 32172,
|
296 |
+
"<loc-69>": 32173,
|
297 |
+
"<loc-6>": 32110,
|
298 |
+
"<loc-70>": 32174,
|
299 |
+
"<loc-71>": 32175,
|
300 |
+
"<loc-72>": 32176,
|
301 |
+
"<loc-73>": 32177,
|
302 |
+
"<loc-74>": 32178,
|
303 |
+
"<loc-75>": 32179,
|
304 |
+
"<loc-76>": 32180,
|
305 |
+
"<loc-77>": 32181,
|
306 |
+
"<loc-78>": 32182,
|
307 |
+
"<loc-79>": 32183,
|
308 |
+
"<loc-7>": 32111,
|
309 |
+
"<loc-80>": 32184,
|
310 |
+
"<loc-81>": 32185,
|
311 |
+
"<loc-82>": 32186,
|
312 |
+
"<loc-83>": 32187,
|
313 |
+
"<loc-84>": 32188,
|
314 |
+
"<loc-85>": 32189,
|
315 |
+
"<loc-86>": 32190,
|
316 |
+
"<loc-87>": 32191,
|
317 |
+
"<loc-88>": 32192,
|
318 |
+
"<loc-89>": 32193,
|
319 |
+
"<loc-8>": 32112,
|
320 |
+
"<loc-90>": 32194,
|
321 |
+
"<loc-91>": 32195,
|
322 |
+
"<loc-92>": 32196,
|
323 |
+
"<loc-93>": 32197,
|
324 |
+
"<loc-94>": 32198,
|
325 |
+
"<loc-95>": 32199,
|
326 |
+
"<loc-96>": 32200,
|
327 |
+
"<loc-97>": 32201,
|
328 |
+
"<loc-98>": 32202,
|
329 |
+
"<loc-99>": 32203,
|
330 |
+
"<loc-9>": 32113,
|
331 |
+
"<patch>": 32102
|
332 |
+
}
|
cvlm_llama2_tokenizer_100img_and_224loc_addpatch/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<unk>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
cvlm_llama2_tokenizer_100img_and_224loc_addpatch/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
cvlm_llama2_tokenizer_100img_and_224loc_addpatch/tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<s>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"legacy": false,
|
22 |
+
"model_max_length": 1000000000000000019884624838656,
|
23 |
+
"pad_token": null,
|
24 |
+
"sp_model_kwargs": {},
|
25 |
+
"tokenizer_class": "LlamaTokenizer",
|
26 |
+
"unk_token": {
|
27 |
+
"__type": "AddedToken",
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
seed_detokenizer/first_stage/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a06a56bcfe123b5a01f91fde57999879060cb1155ebfea1b75861d14db7a0c72
|
3 |
+
size 2427381181
|
seed_detokenizer/second_stage/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e631015b687053c3d0bc94e438f73f5e8b311d872d24cd2b02c0601903eb6a0e
|
3 |
+
size 10515136357
|
seed_x/agent/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:defcb3adb638056a2119dea6226ddcaee5407c0aae47be81daa675105ab9cf8b
|
3 |
+
size 860741046
|
seed_x/llm/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/chat_sh/share_300719895/user/sijiezhao/model_hub/Llama-2-13b-chat-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"bos_token_id": 1,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"hidden_act": "silu",
|
9 |
+
"hidden_size": 5120,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 13824,
|
12 |
+
"max_position_embeddings": 2048,
|
13 |
+
"model_type": "llama",
|
14 |
+
"num_attention_heads": 40,
|
15 |
+
"num_hidden_layers": 40,
|
16 |
+
"num_key_value_heads": 40,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"pretraining_tp": 2,
|
19 |
+
"rms_norm_eps": 1e-05,
|
20 |
+
"rope_scaling": null,
|
21 |
+
"tie_word_embeddings": false,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.30.2",
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 32330
|
26 |
+
}
|
seed_x/llm/generation_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"temperature": 0.9,
|
7 |
+
"top_p": 0.6,
|
8 |
+
"transformers_version": "4.30.2"
|
9 |
+
}
|
seed_x/llm/pytorch_model-00001-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3cae98c7c9cde9d3ca71b89f6b0fa122257402e91655c6a4ab9ebf658c9e1dd
|
3 |
+
size 9963302283
|
seed_x/llm/pytorch_model-00002-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c02c558ce9c5f029ae1cde04c9ce7798da1f875654b7e827a90609436010dd7
|
3 |
+
size 9940856385
|
seed_x/llm/pytorch_model-00003-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:62933151997f0c4eabb3fe24d765620a80354033f3459a02a4ede9b278c2b151
|
3 |
+
size 9940856943
|
seed_x/llm/pytorch_model-00004-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e9cc585aba8ec09b374c67cc594d435f8c382aa0bae241236ff782c6d58d991
|
3 |
+
size 9867415289
|
seed_x/llm/pytorch_model-00005-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:65a8e17cd863cf356d993d1c59df9a59f95f2e138944290e6c6aac427c1bd9dd
|
3 |
+
size 9867456961
|
seed_x/llm/pytorch_model-00006-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d9345e45a28ad3e13ae1696ee37c141b450b1ebc1044400c2b84d9b503f8f68
|
3 |
+
size 2497234607
|
seed_x/llm/pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 52076984320
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "pytorch_model-00006-of-00006.bin",
|
7 |
+
"model.embed_tokens.weight": "pytorch_model-00001-of-00006.bin",
|
8 |
+
"model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
16 |
+
"model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
17 |
+
"model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
18 |
+
"model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
19 |
+
"model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
20 |
+
"model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
21 |
+
"model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
22 |
+
"model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
23 |
+
"model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
24 |
+
"model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
25 |
+
"model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
26 |
+
"model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
27 |
+
"model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
28 |
+
"model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
29 |
+
"model.layers.10.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
30 |
+
"model.layers.10.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
31 |
+
"model.layers.10.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
32 |
+
"model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
33 |
+
"model.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
34 |
+
"model.layers.10.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
35 |
+
"model.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
36 |
+
"model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
37 |
+
"model.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
38 |
+
"model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
39 |
+
"model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
40 |
+
"model.layers.11.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
41 |
+
"model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
42 |
+
"model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
43 |
+
"model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
44 |
+
"model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
45 |
+
"model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
46 |
+
"model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
47 |
+
"model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
48 |
+
"model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
49 |
+
"model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
50 |
+
"model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
51 |
+
"model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
52 |
+
"model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
53 |
+
"model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
54 |
+
"model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
55 |
+
"model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
56 |
+
"model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
57 |
+
"model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
58 |
+
"model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
59 |
+
"model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
60 |
+
"model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
61 |
+
"model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
62 |
+
"model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
63 |
+
"model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
64 |
+
"model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
65 |
+
"model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
66 |
+
"model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
67 |
+
"model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
68 |
+
"model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
69 |
+
"model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
70 |
+
"model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
71 |
+
"model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
72 |
+
"model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
73 |
+
"model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
74 |
+
"model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
75 |
+
"model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
76 |
+
"model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
77 |
+
"model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
78 |
+
"model.layers.15.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
79 |
+
"model.layers.15.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
80 |
+
"model.layers.15.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
81 |
+
"model.layers.15.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
82 |
+
"model.layers.15.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
83 |
+
"model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
84 |
+
"model.layers.15.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
85 |
+
"model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
86 |
+
"model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
87 |
+
"model.layers.15.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
88 |
+
"model.layers.16.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
89 |
+
"model.layers.16.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
90 |
+
"model.layers.16.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
91 |
+
"model.layers.16.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
92 |
+
"model.layers.16.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
93 |
+
"model.layers.16.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
94 |
+
"model.layers.16.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
95 |
+
"model.layers.16.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
96 |
+
"model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
97 |
+
"model.layers.16.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
98 |
+
"model.layers.17.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
99 |
+
"model.layers.17.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
100 |
+
"model.layers.17.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
101 |
+
"model.layers.17.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
102 |
+
"model.layers.17.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
103 |
+
"model.layers.17.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
104 |
+
"model.layers.17.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
105 |
+
"model.layers.17.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
106 |
+
"model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
107 |
+
"model.layers.17.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
108 |
+
"model.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
109 |
+
"model.layers.18.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
110 |
+
"model.layers.18.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
111 |
+
"model.layers.18.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
112 |
+
"model.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
113 |
+
"model.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
114 |
+
"model.layers.18.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
115 |
+
"model.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
116 |
+
"model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
117 |
+
"model.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
118 |
+
"model.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
119 |
+
"model.layers.19.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
120 |
+
"model.layers.19.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
121 |
+
"model.layers.19.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
122 |
+
"model.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
123 |
+
"model.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
124 |
+
"model.layers.19.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
125 |
+
"model.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
126 |
+
"model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
127 |
+
"model.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
128 |
+
"model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
129 |
+
"model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
130 |
+
"model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
131 |
+
"model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
132 |
+
"model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
133 |
+
"model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
134 |
+
"model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
135 |
+
"model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
136 |
+
"model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
137 |
+
"model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
138 |
+
"model.layers.20.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
139 |
+
"model.layers.20.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
140 |
+
"model.layers.20.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
141 |
+
"model.layers.20.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
142 |
+
"model.layers.20.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
143 |
+
"model.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
144 |
+
"model.layers.20.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
145 |
+
"model.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
146 |
+
"model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
147 |
+
"model.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
148 |
+
"model.layers.21.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
149 |
+
"model.layers.21.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
150 |
+
"model.layers.21.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
151 |
+
"model.layers.21.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
152 |
+
"model.layers.21.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
153 |
+
"model.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
154 |
+
"model.layers.21.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
155 |
+
"model.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
156 |
+
"model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
157 |
+
"model.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
158 |
+
"model.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
159 |
+
"model.layers.22.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
160 |
+
"model.layers.22.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
161 |
+
"model.layers.22.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
162 |
+
"model.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
163 |
+
"model.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
164 |
+
"model.layers.22.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
165 |
+
"model.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
166 |
+
"model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00006.bin",
|
167 |
+
"model.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
168 |
+
"model.layers.23.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
169 |
+
"model.layers.23.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
170 |
+
"model.layers.23.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
171 |
+
"model.layers.23.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
172 |
+
"model.layers.23.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
173 |
+
"model.layers.23.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
174 |
+
"model.layers.23.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
175 |
+
"model.layers.23.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
176 |
+
"model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
177 |
+
"model.layers.23.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
178 |
+
"model.layers.24.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
179 |
+
"model.layers.24.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
180 |
+
"model.layers.24.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
181 |
+
"model.layers.24.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
182 |
+
"model.layers.24.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
183 |
+
"model.layers.24.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
184 |
+
"model.layers.24.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
185 |
+
"model.layers.24.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
186 |
+
"model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
187 |
+
"model.layers.24.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
188 |
+
"model.layers.25.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
189 |
+
"model.layers.25.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
190 |
+
"model.layers.25.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
191 |
+
"model.layers.25.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
192 |
+
"model.layers.25.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
193 |
+
"model.layers.25.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
194 |
+
"model.layers.25.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
195 |
+
"model.layers.25.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
196 |
+
"model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
197 |
+
"model.layers.25.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
198 |
+
"model.layers.26.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
199 |
+
"model.layers.26.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
200 |
+
"model.layers.26.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
201 |
+
"model.layers.26.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
202 |
+
"model.layers.26.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
203 |
+
"model.layers.26.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
204 |
+
"model.layers.26.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
205 |
+
"model.layers.26.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
206 |
+
"model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
207 |
+
"model.layers.26.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
208 |
+
"model.layers.27.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
209 |
+
"model.layers.27.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
210 |
+
"model.layers.27.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
211 |
+
"model.layers.27.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
212 |
+
"model.layers.27.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
213 |
+
"model.layers.27.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
214 |
+
"model.layers.27.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
215 |
+
"model.layers.27.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
216 |
+
"model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
217 |
+
"model.layers.27.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
218 |
+
"model.layers.28.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
219 |
+
"model.layers.28.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
220 |
+
"model.layers.28.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
221 |
+
"model.layers.28.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
222 |
+
"model.layers.28.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
223 |
+
"model.layers.28.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
224 |
+
"model.layers.28.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
225 |
+
"model.layers.28.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
226 |
+
"model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
227 |
+
"model.layers.28.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
228 |
+
"model.layers.29.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
229 |
+
"model.layers.29.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
230 |
+
"model.layers.29.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
231 |
+
"model.layers.29.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
232 |
+
"model.layers.29.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
233 |
+
"model.layers.29.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
234 |
+
"model.layers.29.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
235 |
+
"model.layers.29.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
236 |
+
"model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
237 |
+
"model.layers.29.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
238 |
+
"model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
239 |
+
"model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
240 |
+
"model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
241 |
+
"model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
242 |
+
"model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
243 |
+
"model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
244 |
+
"model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
245 |
+
"model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
246 |
+
"model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
247 |
+
"model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
248 |
+
"model.layers.30.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
249 |
+
"model.layers.30.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
250 |
+
"model.layers.30.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
251 |
+
"model.layers.30.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
252 |
+
"model.layers.30.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
253 |
+
"model.layers.30.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
254 |
+
"model.layers.30.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
255 |
+
"model.layers.30.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
256 |
+
"model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00006.bin",
|
257 |
+
"model.layers.30.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
258 |
+
"model.layers.31.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
259 |
+
"model.layers.31.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
260 |
+
"model.layers.31.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
261 |
+
"model.layers.31.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
262 |
+
"model.layers.31.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
263 |
+
"model.layers.31.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
264 |
+
"model.layers.31.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
265 |
+
"model.layers.31.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
266 |
+
"model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
267 |
+
"model.layers.31.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
268 |
+
"model.layers.32.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
269 |
+
"model.layers.32.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
270 |
+
"model.layers.32.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
271 |
+
"model.layers.32.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
272 |
+
"model.layers.32.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
273 |
+
"model.layers.32.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
274 |
+
"model.layers.32.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
275 |
+
"model.layers.32.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
276 |
+
"model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
277 |
+
"model.layers.32.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
278 |
+
"model.layers.33.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
279 |
+
"model.layers.33.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
280 |
+
"model.layers.33.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
281 |
+
"model.layers.33.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
282 |
+
"model.layers.33.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
283 |
+
"model.layers.33.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
284 |
+
"model.layers.33.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
285 |
+
"model.layers.33.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
286 |
+
"model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
287 |
+
"model.layers.33.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
288 |
+
"model.layers.34.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
289 |
+
"model.layers.34.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
290 |
+
"model.layers.34.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
291 |
+
"model.layers.34.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
292 |
+
"model.layers.34.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
293 |
+
"model.layers.34.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
294 |
+
"model.layers.34.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
295 |
+
"model.layers.34.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
296 |
+
"model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
297 |
+
"model.layers.34.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
298 |
+
"model.layers.35.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
299 |
+
"model.layers.35.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
300 |
+
"model.layers.35.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
301 |
+
"model.layers.35.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
302 |
+
"model.layers.35.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
303 |
+
"model.layers.35.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
304 |
+
"model.layers.35.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
305 |
+
"model.layers.35.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
306 |
+
"model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
307 |
+
"model.layers.35.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
308 |
+
"model.layers.36.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
309 |
+
"model.layers.36.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
310 |
+
"model.layers.36.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
311 |
+
"model.layers.36.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
312 |
+
"model.layers.36.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
313 |
+
"model.layers.36.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
314 |
+
"model.layers.36.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
315 |
+
"model.layers.36.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
316 |
+
"model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
317 |
+
"model.layers.36.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
318 |
+
"model.layers.37.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
319 |
+
"model.layers.37.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
320 |
+
"model.layers.37.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
321 |
+
"model.layers.37.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
322 |
+
"model.layers.37.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
323 |
+
"model.layers.37.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
324 |
+
"model.layers.37.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
325 |
+
"model.layers.37.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
326 |
+
"model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
327 |
+
"model.layers.37.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
328 |
+
"model.layers.38.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
329 |
+
"model.layers.38.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
330 |
+
"model.layers.38.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
331 |
+
"model.layers.38.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
332 |
+
"model.layers.38.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
333 |
+
"model.layers.38.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
334 |
+
"model.layers.38.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
335 |
+
"model.layers.38.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
336 |
+
"model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00006.bin",
|
337 |
+
"model.layers.38.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
338 |
+
"model.layers.39.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
339 |
+
"model.layers.39.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
340 |
+
"model.layers.39.mlp.gate_proj.weight": "pytorch_model-00006-of-00006.bin",
|
341 |
+
"model.layers.39.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
342 |
+
"model.layers.39.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
343 |
+
"model.layers.39.self_attn.k_proj.weight": "pytorch_model-00006-of-00006.bin",
|
344 |
+
"model.layers.39.self_attn.o_proj.weight": "pytorch_model-00006-of-00006.bin",
|
345 |
+
"model.layers.39.self_attn.q_proj.weight": "pytorch_model-00006-of-00006.bin",
|
346 |
+
"model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00006.bin",
|
347 |
+
"model.layers.39.self_attn.v_proj.weight": "pytorch_model-00006-of-00006.bin",
|
348 |
+
"model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
349 |
+
"model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
350 |
+
"model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
351 |
+
"model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
352 |
+
"model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
353 |
+
"model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
354 |
+
"model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
355 |
+
"model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
356 |
+
"model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
357 |
+
"model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
358 |
+
"model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
359 |
+
"model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
360 |
+
"model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
361 |
+
"model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
362 |
+
"model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
363 |
+
"model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
364 |
+
"model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
365 |
+
"model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
366 |
+
"model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
367 |
+
"model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
368 |
+
"model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
369 |
+
"model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
370 |
+
"model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
371 |
+
"model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
372 |
+
"model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
373 |
+
"model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
374 |
+
"model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
375 |
+
"model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
376 |
+
"model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
377 |
+
"model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
378 |
+
"model.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
379 |
+
"model.layers.7.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
380 |
+
"model.layers.7.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
381 |
+
"model.layers.7.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
382 |
+
"model.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
383 |
+
"model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
384 |
+
"model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
385 |
+
"model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
386 |
+
"model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00006.bin",
|
387 |
+
"model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
388 |
+
"model.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
389 |
+
"model.layers.8.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
390 |
+
"model.layers.8.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
391 |
+
"model.layers.8.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
392 |
+
"model.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
393 |
+
"model.layers.8.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
394 |
+
"model.layers.8.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
395 |
+
"model.layers.8.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
396 |
+
"model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
397 |
+
"model.layers.8.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
398 |
+
"model.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
399 |
+
"model.layers.9.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
400 |
+
"model.layers.9.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
401 |
+
"model.layers.9.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
402 |
+
"model.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
403 |
+
"model.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
404 |
+
"model.layers.9.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
405 |
+
"model.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
406 |
+
"model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00006.bin",
|
407 |
+
"model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
408 |
+
"model.norm.weight": "pytorch_model-00006-of-00006.bin"
|
409 |
+
}
|
410 |
+
}
|
seed_x_edit/agent/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a13dec9f7d43142ecfd2642f74cbf633f046ac6c7c5836c95e247dabb19ebd62
|
3 |
+
size 860741046
|
seed_x_edit/llm/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/chat_sh/share_300719895/user/yuyingge/jinguo_code/DiscreteLearning_debug/train_output/04_08_seedx_llama13b_vitg_448_anyres_pretrain/checkpoint-23000-merged/llm",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"bos_token_id": 1,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"hidden_act": "silu",
|
9 |
+
"hidden_size": 5120,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 13824,
|
12 |
+
"max_position_embeddings": 2048,
|
13 |
+
"model_type": "llama",
|
14 |
+
"num_attention_heads": 40,
|
15 |
+
"num_hidden_layers": 40,
|
16 |
+
"num_key_value_heads": 40,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"pretraining_tp": 2,
|
19 |
+
"rms_norm_eps": 1e-05,
|
20 |
+
"rope_scaling": null,
|
21 |
+
"tie_word_embeddings": false,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.30.2",
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 32330
|
26 |
+
}
|
seed_x_edit/llm/generation_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"temperature": 0.9,
|
7 |
+
"top_p": 0.6,
|
8 |
+
"transformers_version": "4.30.2"
|
9 |
+
}
|
seed_x_edit/llm/pytorch_model-00001-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c264d2e8af73c29b05775089eb7cd2cea2725c90f31dd39672579e47a245e028
|
3 |
+
size 9963302283
|
seed_x_edit/llm/pytorch_model-00002-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69d271bdd52398a44f24d76c664d4c60835f23defebdb72b5cc1f146dc90fc72
|
3 |
+
size 9940856385
|
seed_x_edit/llm/pytorch_model-00003-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:03e0d6671e3454d4b2e04c8913c1c1007ef3e6ffa9a516ed178c51ead0d0b8a6
|
3 |
+
size 9940856943
|
seed_x_edit/llm/pytorch_model-00004-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2eeac1037d3318fb095d95ff963c4f3277bd714b11d6b3a23962b354b7e7d335
|
3 |
+
size 9867415289
|