add AIBOM
#20
by
fatima113
- opened
Qwen_Qwen2.5-7B-Instruct.json
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bomFormat": "CycloneDX",
|
3 |
+
"specVersion": "1.6",
|
4 |
+
"serialNumber": "urn:uuid:4ca2b6f8-6145-4d53-ae76-dc5fc7f694bb",
|
5 |
+
"version": 1,
|
6 |
+
"metadata": {
|
7 |
+
"timestamp": "2025-06-05T09:37:31.466856+00:00",
|
8 |
+
"component": {
|
9 |
+
"type": "machine-learning-model",
|
10 |
+
"bom-ref": "Qwen/Qwen2.5-7B-Instruct-0aa1dcca-35d6-540c-bb64-eb00402ee564",
|
11 |
+
"name": "Qwen/Qwen2.5-7B-Instruct",
|
12 |
+
"externalReferences": [
|
13 |
+
{
|
14 |
+
"url": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct",
|
15 |
+
"type": "documentation"
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"modelCard": {
|
19 |
+
"modelParameters": {
|
20 |
+
"task": "text-generation",
|
21 |
+
"architectureFamily": "qwen2",
|
22 |
+
"modelArchitecture": "Qwen2ForCausalLM"
|
23 |
+
},
|
24 |
+
"properties": [
|
25 |
+
{
|
26 |
+
"name": "library_name",
|
27 |
+
"value": "transformers"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "base_model",
|
31 |
+
"value": "Qwen/Qwen2.5-7B"
|
32 |
+
}
|
33 |
+
]
|
34 |
+
},
|
35 |
+
"authors": [
|
36 |
+
{
|
37 |
+
"name": "Qwen"
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"licenses": [
|
41 |
+
{
|
42 |
+
"license": {
|
43 |
+
"id": "Apache-2.0",
|
44 |
+
"url": "https://spdx.org/licenses/Apache-2.0.html"
|
45 |
+
}
|
46 |
+
}
|
47 |
+
],
|
48 |
+
"description": "Qwen2.5 is the latest series of Qwen large language models. For Qwen2.5, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters. Qwen2.5 brings the following improvements upon Qwen2:- Significantly **more knowledge** and has greatly improved capabilities in **coding** and **mathematics**, thanks to our specialized expert models in these domains.- Significant improvements in **instruction following**, **generating long texts** (over 8K tokens), **understanding structured data** (e.g, tables), and **generating structured outputs** especially JSON. **More resilient to the diversity of system prompts**, enhancing role-play implementation and condition-setting for chatbots.- **Long-context Support** up to 128K tokens and can generate up to 8K tokens.- **Multilingual support** for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.**This repo contains the instruction-tuned 7B Qwen2.5 model**, which has the following features:- Type: Causal Language Models- Training Stage: Pretraining & Post-training- Architecture: transformers with RoPE, SwiGLU, RMSNorm, and Attention QKV bias- Number of Parameters: 7.61B- Number of Paramaters (Non-Embedding): 6.53B- Number of Layers: 28- Number of Attention Heads (GQA): 28 for Q and 4 for KV- Context Length: Full 131,072 tokens and generation 8192 tokens- Please refer to [this section](#processing-long-texts) for detailed instructions on how to deploy Qwen2.5 for handling long texts.For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2.5/), [GitHub](https://github.com/QwenLM/Qwen2.5), and [Documentation](https://qwen.readthedocs.io/en/latest/).",
|
49 |
+
"tags": [
|
50 |
+
"transformers",
|
51 |
+
"safetensors",
|
52 |
+
"qwen2",
|
53 |
+
"text-generation",
|
54 |
+
"chat",
|
55 |
+
"conversational",
|
56 |
+
"en",
|
57 |
+
"arxiv:2309.00071",
|
58 |
+
"arxiv:2407.10671",
|
59 |
+
"base_model:Qwen/Qwen2.5-7B",
|
60 |
+
"base_model:finetune:Qwen/Qwen2.5-7B",
|
61 |
+
"license:apache-2.0",
|
62 |
+
"autotrain_compatible",
|
63 |
+
"text-generation-inference",
|
64 |
+
"endpoints_compatible",
|
65 |
+
"region:us"
|
66 |
+
]
|
67 |
+
}
|
68 |
+
}
|
69 |
+
}
|