dpquoc commited on
Commit
2f3418d
·
verified ·
1 Parent(s): e801b10

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +10 -0
  2. checkpoint-1080/README.md +202 -0
  3. checkpoint-1080/adapter_config.json +38 -0
  4. checkpoint-1080/adapter_model.safetensors +3 -0
  5. checkpoint-1080/global_step1080/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  6. checkpoint-1080/global_step1080/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  7. checkpoint-1080/global_step1080/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  8. checkpoint-1080/global_step1080/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  9. checkpoint-1080/global_step1080/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  10. checkpoint-1080/global_step1080/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  11. checkpoint-1080/global_step1080/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  12. checkpoint-1080/global_step1080/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  13. checkpoint-1080/latest +1 -0
  14. checkpoint-1080/rng_state_0.pth +3 -0
  15. checkpoint-1080/rng_state_1.pth +3 -0
  16. checkpoint-1080/rng_state_2.pth +3 -0
  17. checkpoint-1080/rng_state_3.pth +3 -0
  18. checkpoint-1080/scheduler.pt +3 -0
  19. checkpoint-1080/special_tokens_map.json +30 -0
  20. checkpoint-1080/tokenizer.json +3 -0
  21. checkpoint-1080/tokenizer_config.json +364 -0
  22. checkpoint-1080/trainer_state.json +3869 -0
  23. checkpoint-1080/training_args.bin +3 -0
  24. checkpoint-1080/zero_to_fp32.py +760 -0
  25. checkpoint-1170/README.md +202 -0
  26. checkpoint-1170/adapter_config.json +38 -0
  27. checkpoint-1170/adapter_model.safetensors +3 -0
  28. checkpoint-1170/global_step1170/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  29. checkpoint-1170/global_step1170/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  30. checkpoint-1170/global_step1170/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  31. checkpoint-1170/global_step1170/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  32. checkpoint-1170/global_step1170/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  33. checkpoint-1170/global_step1170/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  34. checkpoint-1170/global_step1170/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  35. checkpoint-1170/global_step1170/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  36. checkpoint-1170/latest +1 -0
  37. checkpoint-1170/rng_state_0.pth +3 -0
  38. checkpoint-1170/rng_state_1.pth +3 -0
  39. checkpoint-1170/rng_state_2.pth +3 -0
  40. checkpoint-1170/rng_state_3.pth +3 -0
  41. checkpoint-1170/scheduler.pt +3 -0
  42. checkpoint-1170/special_tokens_map.json +30 -0
  43. checkpoint-1170/tokenizer.json +3 -0
  44. checkpoint-1170/tokenizer_config.json +364 -0
  45. checkpoint-1170/trainer_state.json +0 -0
  46. checkpoint-1170/training_args.bin +3 -0
  47. checkpoint-1170/zero_to_fp32.py +760 -0
  48. checkpoint-1260/README.md +202 -0
  49. checkpoint-1260/adapter_config.json +38 -0
  50. checkpoint-1260/adapter_model.safetensors +3 -0
.gitattributes CHANGED
@@ -39,3 +39,13 @@ checkpoint-360/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
  checkpoint-450/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  checkpoint-540/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
  checkpoint-90/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
39
  checkpoint-450/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  checkpoint-540/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
  checkpoint-90/tokenizer.json filter=lfs diff=lfs merge=lfs -text
42
+ checkpoint-1080/tokenizer.json filter=lfs diff=lfs merge=lfs -text
43
+ checkpoint-1170/tokenizer.json filter=lfs diff=lfs merge=lfs -text
44
+ checkpoint-1260/tokenizer.json filter=lfs diff=lfs merge=lfs -text
45
+ checkpoint-1350/tokenizer.json filter=lfs diff=lfs merge=lfs -text
46
+ checkpoint-1440/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
+ checkpoint-630/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
+ checkpoint-720/tokenizer.json filter=lfs diff=lfs merge=lfs -text
49
+ checkpoint-810/tokenizer.json filter=lfs diff=lfs merge=lfs -text
50
+ checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
51
+ checkpoint-990/tokenizer.json filter=lfs diff=lfs merge=lfs -text
checkpoint-1080/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: ../../initial_seq_model
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.14.0
checkpoint-1080/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "../../initial_seq_model",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 16,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "k_proj",
27
+ "o_proj",
28
+ "gate_proj",
29
+ "down_proj",
30
+ "score",
31
+ "v_proj",
32
+ "up_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
checkpoint-1080/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b25cfc558172ed41b9a04d0500d65e9e73e7db84f502a72accf946894db5019
3
+ size 42068368
checkpoint-1080/global_step1080/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d9031577da1e7762c50b932da888a5c6626c63bcffad7cb51fd30804feae32
3
+ size 63016432
checkpoint-1080/global_step1080/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7af06667ef17aeea9d89d00c3c76ae36eee2a1cdcac706482e3488e799ccaf9
3
+ size 63016432
checkpoint-1080/global_step1080/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c7680d2989209f98fd87190ed24fe7da8a9a333806491e6af7618ee47fa5862
3
+ size 63016432
checkpoint-1080/global_step1080/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e38d8ad525868f0f592e521ed1dcb8742632c4bf31e06cf17f1b5cd7c78456a
3
+ size 63016432
checkpoint-1080/global_step1080/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e58b10a37b18158187fef1e12724d4bc181db6b181b4ed070ee81a105387a8d
3
+ size 442088
checkpoint-1080/global_step1080/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fde76fbf099ae4028b3a15775b51903443a331c2e6b53a8f57be057d4f88680
3
+ size 442088
checkpoint-1080/global_step1080/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f02704af3e1c2b3552c2af14ddf3c52cd61057b39457dfa179ed00f101be8d92
3
+ size 442088
checkpoint-1080/global_step1080/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d39805cff41b8cdc173c9b4ef09c15ccaa3077c90461c90f5e93ad7efb4f5770
3
+ size 442088
checkpoint-1080/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step1080
checkpoint-1080/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32f8e825bf8d6d7e567ef987886f16dcf2c971fa3832d0f55ed70c72a89ccca3
3
+ size 14960
checkpoint-1080/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:785f20bfacf7717dbee5cfbae9e04092e67d092ed018bcc2edbfbf2d7b8f746c
3
+ size 14960
checkpoint-1080/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:112fbe394f47a631dac246dc171a6ecb1aae5ea5fe0460593dfa3770e31c3930
3
+ size 14960
checkpoint-1080/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd04444df403dd9e3e1c9a81712371ec25ed4b29ab132a3fcb2ef7dc379614e3
3
+ size 14960
checkpoint-1080/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30f8b1d36df474e2f70f75a59e2dfeaf50b8c1ba00ffcfcf7bd86c066705e146
3
+ size 1064
checkpoint-1080/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<BOS_TOKEN>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|END_OF_TURN_TOKEN|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<UNK>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-1080/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28c8b8038fcb2756e349a51832a56634423c579a869f39642526327aa56b2989
3
+ size 20125189
checkpoint-1080/tokenizer_config.json ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<PAD>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<UNK>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "<CLS>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "3": {
31
+ "content": "<SEP>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "4": {
39
+ "content": "<MASK_TOKEN>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "5": {
47
+ "content": "<BOS_TOKEN>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "6": {
55
+ "content": "<EOS_TOKEN>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "7": {
63
+ "content": "<EOP_TOKEN>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "255000": {
71
+ "content": "<|START_OF_TURN_TOKEN|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": false
77
+ },
78
+ "255001": {
79
+ "content": "<|END_OF_TURN_TOKEN|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "255002": {
87
+ "content": "<|YES_TOKEN|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "255003": {
95
+ "content": "<|NO_TOKEN|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": false
101
+ },
102
+ "255004": {
103
+ "content": "<|GOOD_TOKEN|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": false
109
+ },
110
+ "255005": {
111
+ "content": "<|BAD_TOKEN|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": false
117
+ },
118
+ "255006": {
119
+ "content": "<|USER_TOKEN|>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "255007": {
127
+ "content": "<|CHATBOT_TOKEN|>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "255008": {
135
+ "content": "<|SYSTEM_TOKEN|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "255009": {
143
+ "content": "<|USER_0_TOKEN|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "255010": {
151
+ "content": "<|USER_1_TOKEN|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "255011": {
159
+ "content": "<|USER_2_TOKEN|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "255012": {
167
+ "content": "<|USER_3_TOKEN|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "255013": {
175
+ "content": "<|USER_4_TOKEN|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ },
182
+ "255014": {
183
+ "content": "<|USER_5_TOKEN|>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": false
189
+ },
190
+ "255015": {
191
+ "content": "<|USER_6_TOKEN|>",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": false
197
+ },
198
+ "255016": {
199
+ "content": "<|USER_7_TOKEN|>",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": false
205
+ },
206
+ "255017": {
207
+ "content": "<|USER_8_TOKEN|>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false,
212
+ "special": false
213
+ },
214
+ "255018": {
215
+ "content": "<|USER_9_TOKEN|>",
216
+ "lstrip": false,
217
+ "normalized": false,
218
+ "rstrip": false,
219
+ "single_word": false,
220
+ "special": false
221
+ },
222
+ "255019": {
223
+ "content": "<|START_THINKING|>",
224
+ "lstrip": false,
225
+ "normalized": false,
226
+ "rstrip": false,
227
+ "single_word": false,
228
+ "special": false
229
+ },
230
+ "255020": {
231
+ "content": "<|END_THINKING|>",
232
+ "lstrip": false,
233
+ "normalized": false,
234
+ "rstrip": false,
235
+ "single_word": false,
236
+ "special": false
237
+ },
238
+ "255021": {
239
+ "content": "<|START_RESPONSE|>",
240
+ "lstrip": false,
241
+ "normalized": false,
242
+ "rstrip": false,
243
+ "single_word": false,
244
+ "special": true
245
+ },
246
+ "255022": {
247
+ "content": "<|END_RESPONSE|>",
248
+ "lstrip": false,
249
+ "normalized": false,
250
+ "rstrip": false,
251
+ "single_word": false,
252
+ "special": true
253
+ },
254
+ "255023": {
255
+ "content": "<|START_ACTION|>",
256
+ "lstrip": false,
257
+ "normalized": false,
258
+ "rstrip": false,
259
+ "single_word": false,
260
+ "special": false
261
+ },
262
+ "255024": {
263
+ "content": "<|END_ACTION|>",
264
+ "lstrip": false,
265
+ "normalized": false,
266
+ "rstrip": false,
267
+ "single_word": false,
268
+ "special": false
269
+ },
270
+ "255025": {
271
+ "content": "<|START_TOOL_RESULT|>",
272
+ "lstrip": false,
273
+ "normalized": false,
274
+ "rstrip": false,
275
+ "single_word": false,
276
+ "special": false
277
+ },
278
+ "255026": {
279
+ "content": "<|END_TOOL_RESULT|>",
280
+ "lstrip": false,
281
+ "normalized": false,
282
+ "rstrip": false,
283
+ "single_word": false,
284
+ "special": false
285
+ },
286
+ "255027": {
287
+ "content": "<|EXTRA_8_TOKEN|>",
288
+ "lstrip": false,
289
+ "normalized": false,
290
+ "rstrip": false,
291
+ "single_word": false,
292
+ "special": false
293
+ },
294
+ "255028": {
295
+ "content": "<|NEW_FILE|>",
296
+ "lstrip": false,
297
+ "normalized": false,
298
+ "rstrip": false,
299
+ "single_word": false,
300
+ "special": true
301
+ },
302
+ "255029": {
303
+ "content": "<|BEGINNING_OF_PREFIX_FIM_TOKEN|>",
304
+ "lstrip": false,
305
+ "normalized": false,
306
+ "rstrip": false,
307
+ "single_word": false,
308
+ "special": false
309
+ },
310
+ "255030": {
311
+ "content": "<|BEGINNING_OF_MIDDLE_FIM_TOKEN|>",
312
+ "lstrip": false,
313
+ "normalized": false,
314
+ "rstrip": false,
315
+ "single_word": false,
316
+ "special": false
317
+ },
318
+ "255031": {
319
+ "content": "<|BEGINNING_OF_SUFFIX_FIM_TOKEN|>",
320
+ "lstrip": false,
321
+ "normalized": false,
322
+ "rstrip": false,
323
+ "single_word": false,
324
+ "special": false
325
+ },
326
+ "255032": {
327
+ "content": "<|END_OF_MIDDLE_FIM_TOKEN|>",
328
+ "lstrip": false,
329
+ "normalized": false,
330
+ "rstrip": false,
331
+ "single_word": false,
332
+ "special": false
333
+ }
334
+ },
335
+ "bos_token": "<BOS_TOKEN>",
336
+ "chat_template": [
337
+ {
338
+ "name": "default",
339
+ "template": "{% if documents %}\n{% set tools = [] %}\n{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"<co>\" and \"</co>\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"<co>span</co: 0:[1,2],1:[0]>\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\n{%- else -%}\n{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}\n {%- set system_message = messages[0]['content'] %}{% elif false == true %}\n {%- set loop_messages = messages %}{% set system_message = '' %}\n{%- else %}\n {%- set loop_messages = messages %}\n {%- set system_message = false %}\n{%- endif %}\n{%- if system_message != false -%}\n {{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}\n{%- else -%}\n {{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|END_OF_TURN_TOKEN|>' }}\n{%- endif %}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n {%- set content = message['content'] -%}\n {%- if message['role'] == 'user' -%}\n {{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}\n {%- elif message['role'] == 'assistant' -%}\n {{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_RESPONSE|>' + content.strip() + '<|END_RESPONSE|><|END_OF_TURN_TOKEN|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt -%}\n {{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_RESPONSE|>' }}\n{%- endif %}\n{% endif %}"
340
+ },
341
+ {
342
+ "name": "tool_use",
343
+ "template": "{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"<co>\" and \"</co>\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"<co>span</co: 0:[1,2],1:[0]>\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
344
+ },
345
+ {
346
+ "name": "rag",
347
+ "template": "{% set tools = [] %}\n{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"<co>\" and \"</co>\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"<co>span</co: 0:[1,2],1:[0]>\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
348
+ }
349
+ ],
350
+ "clean_up_tokenization_spaces": false,
351
+ "eos_token": "<|END_OF_TURN_TOKEN|>",
352
+ "extra_special_tokens": {},
353
+ "legacy": true,
354
+ "merges_file": null,
355
+ "model_max_length": 8192,
356
+ "pad_token": "<PAD>",
357
+ "padding_side": "right",
358
+ "sp_model_kwargs": {},
359
+ "spaces_between_special_tokens": false,
360
+ "tokenizer_class": "CohereTokenizer",
361
+ "unk_token": "<UNK>",
362
+ "use_default_system_prompt": false,
363
+ "vocab_file": null
364
+ }
checkpoint-1080/trainer_state.json ADDED
@@ -0,0 +1,3869 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.375456283678081,
5
+ "eval_steps": 150,
6
+ "global_step": 1080,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0006952894142186686,
13
+ "grad_norm": 5.413117383066536,
14
+ "learning_rate": 6.944444444444445e-07,
15
+ "loss": 2.3653,
16
+ "step": 2
17
+ },
18
+ {
19
+ "epoch": 0.0013905788284373371,
20
+ "grad_norm": 5.092870612337868,
21
+ "learning_rate": 1.388888888888889e-06,
22
+ "loss": 2.5001,
23
+ "step": 4
24
+ },
25
+ {
26
+ "epoch": 0.0020858682426560054,
27
+ "grad_norm": 3.6285625961711943,
28
+ "learning_rate": 2.0833333333333334e-06,
29
+ "loss": 2.3759,
30
+ "step": 6
31
+ },
32
+ {
33
+ "epoch": 0.0027811576568746743,
34
+ "grad_norm": 2.5025911526151075,
35
+ "learning_rate": 2.777777777777778e-06,
36
+ "loss": 1.944,
37
+ "step": 8
38
+ },
39
+ {
40
+ "epoch": 0.0034764470710933427,
41
+ "grad_norm": 3.077266049542496,
42
+ "learning_rate": 3.4722222222222224e-06,
43
+ "loss": 2.2504,
44
+ "step": 10
45
+ },
46
+ {
47
+ "epoch": 0.004171736485312011,
48
+ "grad_norm": 4.068243939187174,
49
+ "learning_rate": 4.166666666666667e-06,
50
+ "loss": 2.0637,
51
+ "step": 12
52
+ },
53
+ {
54
+ "epoch": 0.00486702589953068,
55
+ "grad_norm": 3.511444738830971,
56
+ "learning_rate": 4.861111111111111e-06,
57
+ "loss": 2.3604,
58
+ "step": 14
59
+ },
60
+ {
61
+ "epoch": 0.0055623153137493485,
62
+ "grad_norm": 5.925744892256934,
63
+ "learning_rate": 5.555555555555556e-06,
64
+ "loss": 2.6528,
65
+ "step": 16
66
+ },
67
+ {
68
+ "epoch": 0.0062576047279680165,
69
+ "grad_norm": 2.7950114871483405,
70
+ "learning_rate": 6.25e-06,
71
+ "loss": 2.3229,
72
+ "step": 18
73
+ },
74
+ {
75
+ "epoch": 0.006952894142186685,
76
+ "grad_norm": 6.586211843766182,
77
+ "learning_rate": 6.944444444444445e-06,
78
+ "loss": 2.342,
79
+ "step": 20
80
+ },
81
+ {
82
+ "epoch": 0.0076481835564053535,
83
+ "grad_norm": 4.902839955269193,
84
+ "learning_rate": 7.63888888888889e-06,
85
+ "loss": 2.4188,
86
+ "step": 22
87
+ },
88
+ {
89
+ "epoch": 0.008343472970624021,
90
+ "grad_norm": 4.257062809771645,
91
+ "learning_rate": 8.333333333333334e-06,
92
+ "loss": 1.7957,
93
+ "step": 24
94
+ },
95
+ {
96
+ "epoch": 0.009038762384842691,
97
+ "grad_norm": 4.460352004615699,
98
+ "learning_rate": 9.027777777777777e-06,
99
+ "loss": 2.0726,
100
+ "step": 26
101
+ },
102
+ {
103
+ "epoch": 0.00973405179906136,
104
+ "grad_norm": 5.858061506133739,
105
+ "learning_rate": 9.722222222222223e-06,
106
+ "loss": 2.0455,
107
+ "step": 28
108
+ },
109
+ {
110
+ "epoch": 0.010429341213280027,
111
+ "grad_norm": 4.331946668100709,
112
+ "learning_rate": 1.0416666666666668e-05,
113
+ "loss": 1.7645,
114
+ "step": 30
115
+ },
116
+ {
117
+ "epoch": 0.011124630627498697,
118
+ "grad_norm": 5.428742204187391,
119
+ "learning_rate": 1.1111111111111112e-05,
120
+ "loss": 1.7446,
121
+ "step": 32
122
+ },
123
+ {
124
+ "epoch": 0.011819920041717365,
125
+ "grad_norm": 2.485909286541028,
126
+ "learning_rate": 1.1805555555555555e-05,
127
+ "loss": 1.5885,
128
+ "step": 34
129
+ },
130
+ {
131
+ "epoch": 0.012515209455936033,
132
+ "grad_norm": 3.2602949308730222,
133
+ "learning_rate": 1.25e-05,
134
+ "loss": 1.1014,
135
+ "step": 36
136
+ },
137
+ {
138
+ "epoch": 0.013210498870154701,
139
+ "grad_norm": 4.962187747415964,
140
+ "learning_rate": 1.3194444444444446e-05,
141
+ "loss": 1.3588,
142
+ "step": 38
143
+ },
144
+ {
145
+ "epoch": 0.01390578828437337,
146
+ "grad_norm": 7.627756894198461,
147
+ "learning_rate": 1.388888888888889e-05,
148
+ "loss": 1.4014,
149
+ "step": 40
150
+ },
151
+ {
152
+ "epoch": 0.014601077698592039,
153
+ "grad_norm": 2.821848388410092,
154
+ "learning_rate": 1.4583333333333335e-05,
155
+ "loss": 1.0959,
156
+ "step": 42
157
+ },
158
+ {
159
+ "epoch": 0.015296367112810707,
160
+ "grad_norm": 10.989320944549025,
161
+ "learning_rate": 1.527777777777778e-05,
162
+ "loss": 1.739,
163
+ "step": 44
164
+ },
165
+ {
166
+ "epoch": 0.015991656527029375,
167
+ "grad_norm": 4.300936602591115,
168
+ "learning_rate": 1.597222222222222e-05,
169
+ "loss": 1.1078,
170
+ "step": 46
171
+ },
172
+ {
173
+ "epoch": 0.016686945941248043,
174
+ "grad_norm": 3.7539663261019856,
175
+ "learning_rate": 1.6666666666666667e-05,
176
+ "loss": 1.2701,
177
+ "step": 48
178
+ },
179
+ {
180
+ "epoch": 0.017382235355466714,
181
+ "grad_norm": 2.589248169352173,
182
+ "learning_rate": 1.736111111111111e-05,
183
+ "loss": 1.4452,
184
+ "step": 50
185
+ },
186
+ {
187
+ "epoch": 0.018077524769685382,
188
+ "grad_norm": 3.6679301322156177,
189
+ "learning_rate": 1.8055555555555555e-05,
190
+ "loss": 1.4243,
191
+ "step": 52
192
+ },
193
+ {
194
+ "epoch": 0.01877281418390405,
195
+ "grad_norm": 2.0885660923860074,
196
+ "learning_rate": 1.8750000000000002e-05,
197
+ "loss": 1.4453,
198
+ "step": 54
199
+ },
200
+ {
201
+ "epoch": 0.01946810359812272,
202
+ "grad_norm": 2.955353237610474,
203
+ "learning_rate": 1.9444444444444445e-05,
204
+ "loss": 1.5388,
205
+ "step": 56
206
+ },
207
+ {
208
+ "epoch": 0.020163393012341387,
209
+ "grad_norm": 3.0527006398487018,
210
+ "learning_rate": 2.013888888888889e-05,
211
+ "loss": 1.4043,
212
+ "step": 58
213
+ },
214
+ {
215
+ "epoch": 0.020858682426560055,
216
+ "grad_norm": 1.9862208864154767,
217
+ "learning_rate": 2.0833333333333336e-05,
218
+ "loss": 1.1007,
219
+ "step": 60
220
+ },
221
+ {
222
+ "epoch": 0.021553971840778723,
223
+ "grad_norm": 2.347361178472164,
224
+ "learning_rate": 2.152777777777778e-05,
225
+ "loss": 0.9291,
226
+ "step": 62
227
+ },
228
+ {
229
+ "epoch": 0.022249261254997394,
230
+ "grad_norm": 2.4812993223105995,
231
+ "learning_rate": 2.2222222222222223e-05,
232
+ "loss": 0.9592,
233
+ "step": 64
234
+ },
235
+ {
236
+ "epoch": 0.022944550669216062,
237
+ "grad_norm": 1.639333831845777,
238
+ "learning_rate": 2.2916666666666667e-05,
239
+ "loss": 1.0645,
240
+ "step": 66
241
+ },
242
+ {
243
+ "epoch": 0.02363984008343473,
244
+ "grad_norm": 2.92858351082494,
245
+ "learning_rate": 2.361111111111111e-05,
246
+ "loss": 1.4269,
247
+ "step": 68
248
+ },
249
+ {
250
+ "epoch": 0.024335129497653398,
251
+ "grad_norm": 2.8503434812871604,
252
+ "learning_rate": 2.4305555555555558e-05,
253
+ "loss": 0.7829,
254
+ "step": 70
255
+ },
256
+ {
257
+ "epoch": 0.025030418911872066,
258
+ "grad_norm": 2.4875590459354107,
259
+ "learning_rate": 2.5e-05,
260
+ "loss": 0.9733,
261
+ "step": 72
262
+ },
263
+ {
264
+ "epoch": 0.025725708326090734,
265
+ "grad_norm": 5.2567545525905075,
266
+ "learning_rate": 2.5694444444444445e-05,
267
+ "loss": 1.3559,
268
+ "step": 74
269
+ },
270
+ {
271
+ "epoch": 0.026420997740309402,
272
+ "grad_norm": 4.68745753567611,
273
+ "learning_rate": 2.6388888888888892e-05,
274
+ "loss": 1.1499,
275
+ "step": 76
276
+ },
277
+ {
278
+ "epoch": 0.027116287154528074,
279
+ "grad_norm": 2.810345453706711,
280
+ "learning_rate": 2.7083333333333332e-05,
281
+ "loss": 0.8636,
282
+ "step": 78
283
+ },
284
+ {
285
+ "epoch": 0.02781157656874674,
286
+ "grad_norm": 3.0144493626195388,
287
+ "learning_rate": 2.777777777777778e-05,
288
+ "loss": 0.9274,
289
+ "step": 80
290
+ },
291
+ {
292
+ "epoch": 0.02850686598296541,
293
+ "grad_norm": 2.3263036535508523,
294
+ "learning_rate": 2.8472222222222223e-05,
295
+ "loss": 1.3979,
296
+ "step": 82
297
+ },
298
+ {
299
+ "epoch": 0.029202155397184078,
300
+ "grad_norm": 1.6009019341419857,
301
+ "learning_rate": 2.916666666666667e-05,
302
+ "loss": 0.9039,
303
+ "step": 84
304
+ },
305
+ {
306
+ "epoch": 0.029897444811402746,
307
+ "grad_norm": 2.0637506891442294,
308
+ "learning_rate": 2.9861111111111113e-05,
309
+ "loss": 1.5187,
310
+ "step": 86
311
+ },
312
+ {
313
+ "epoch": 0.030592734225621414,
314
+ "grad_norm": 1.5533001268209932,
315
+ "learning_rate": 3.055555555555556e-05,
316
+ "loss": 0.8937,
317
+ "step": 88
318
+ },
319
+ {
320
+ "epoch": 0.03128802363984008,
321
+ "grad_norm": 1.9974857545819733,
322
+ "learning_rate": 3.125e-05,
323
+ "loss": 0.8218,
324
+ "step": 90
325
+ },
326
+ {
327
+ "epoch": 0.03198331305405875,
328
+ "grad_norm": 1.871899331408953,
329
+ "learning_rate": 3.194444444444444e-05,
330
+ "loss": 1.2451,
331
+ "step": 92
332
+ },
333
+ {
334
+ "epoch": 0.03267860246827742,
335
+ "grad_norm": 1.591601830348497,
336
+ "learning_rate": 3.263888888888889e-05,
337
+ "loss": 0.7485,
338
+ "step": 94
339
+ },
340
+ {
341
+ "epoch": 0.033373891882496086,
342
+ "grad_norm": 2.404166647711005,
343
+ "learning_rate": 3.3333333333333335e-05,
344
+ "loss": 1.1587,
345
+ "step": 96
346
+ },
347
+ {
348
+ "epoch": 0.03406918129671476,
349
+ "grad_norm": 1.581080788392888,
350
+ "learning_rate": 3.402777777777778e-05,
351
+ "loss": 0.9578,
352
+ "step": 98
353
+ },
354
+ {
355
+ "epoch": 0.03476447071093343,
356
+ "grad_norm": 2.1551207338771547,
357
+ "learning_rate": 3.472222222222222e-05,
358
+ "loss": 1.1305,
359
+ "step": 100
360
+ },
361
+ {
362
+ "epoch": 0.0354597601251521,
363
+ "grad_norm": 4.2246272812704,
364
+ "learning_rate": 3.541666666666667e-05,
365
+ "loss": 1.04,
366
+ "step": 102
367
+ },
368
+ {
369
+ "epoch": 0.036155049539370765,
370
+ "grad_norm": 2.5526898161765588,
371
+ "learning_rate": 3.611111111111111e-05,
372
+ "loss": 1.0743,
373
+ "step": 104
374
+ },
375
+ {
376
+ "epoch": 0.03685033895358943,
377
+ "grad_norm": 1.9784815137623597,
378
+ "learning_rate": 3.6805555555555556e-05,
379
+ "loss": 0.9667,
380
+ "step": 106
381
+ },
382
+ {
383
+ "epoch": 0.0375456283678081,
384
+ "grad_norm": 1.9131648200880944,
385
+ "learning_rate": 3.7500000000000003e-05,
386
+ "loss": 0.8951,
387
+ "step": 108
388
+ },
389
+ {
390
+ "epoch": 0.03824091778202677,
391
+ "grad_norm": 3.9405296351174575,
392
+ "learning_rate": 3.8194444444444444e-05,
393
+ "loss": 1.222,
394
+ "step": 110
395
+ },
396
+ {
397
+ "epoch": 0.03893620719624544,
398
+ "grad_norm": 13.764861545850291,
399
+ "learning_rate": 3.888888888888889e-05,
400
+ "loss": 1.0497,
401
+ "step": 112
402
+ },
403
+ {
404
+ "epoch": 0.039631496610464105,
405
+ "grad_norm": 3.998123122175411,
406
+ "learning_rate": 3.958333333333333e-05,
407
+ "loss": 1.0901,
408
+ "step": 114
409
+ },
410
+ {
411
+ "epoch": 0.04032678602468277,
412
+ "grad_norm": 3.126193921423756,
413
+ "learning_rate": 4.027777777777778e-05,
414
+ "loss": 1.1143,
415
+ "step": 116
416
+ },
417
+ {
418
+ "epoch": 0.04102207543890144,
419
+ "grad_norm": 2.7839926692610613,
420
+ "learning_rate": 4.0972222222222225e-05,
421
+ "loss": 1.1637,
422
+ "step": 118
423
+ },
424
+ {
425
+ "epoch": 0.04171736485312011,
426
+ "grad_norm": 2.591162621161276,
427
+ "learning_rate": 4.166666666666667e-05,
428
+ "loss": 1.0624,
429
+ "step": 120
430
+ },
431
+ {
432
+ "epoch": 0.04241265426733878,
433
+ "grad_norm": 1.4930703711545332,
434
+ "learning_rate": 4.236111111111111e-05,
435
+ "loss": 1.0239,
436
+ "step": 122
437
+ },
438
+ {
439
+ "epoch": 0.043107943681557445,
440
+ "grad_norm": 4.355150070532966,
441
+ "learning_rate": 4.305555555555556e-05,
442
+ "loss": 1.266,
443
+ "step": 124
444
+ },
445
+ {
446
+ "epoch": 0.04380323309577612,
447
+ "grad_norm": 1.8285608876277135,
448
+ "learning_rate": 4.375e-05,
449
+ "loss": 1.0667,
450
+ "step": 126
451
+ },
452
+ {
453
+ "epoch": 0.04449852250999479,
454
+ "grad_norm": 2.1848832742617055,
455
+ "learning_rate": 4.4444444444444447e-05,
456
+ "loss": 0.7549,
457
+ "step": 128
458
+ },
459
+ {
460
+ "epoch": 0.045193811924213456,
461
+ "grad_norm": 3.547857947451226,
462
+ "learning_rate": 4.5138888888888894e-05,
463
+ "loss": 0.7974,
464
+ "step": 130
465
+ },
466
+ {
467
+ "epoch": 0.045889101338432124,
468
+ "grad_norm": 3.372457028128184,
469
+ "learning_rate": 4.5833333333333334e-05,
470
+ "loss": 1.0482,
471
+ "step": 132
472
+ },
473
+ {
474
+ "epoch": 0.04658439075265079,
475
+ "grad_norm": 2.2871138548091703,
476
+ "learning_rate": 4.652777777777778e-05,
477
+ "loss": 0.8246,
478
+ "step": 134
479
+ },
480
+ {
481
+ "epoch": 0.04727968016686946,
482
+ "grad_norm": 1.7489408054743605,
483
+ "learning_rate": 4.722222222222222e-05,
484
+ "loss": 1.0011,
485
+ "step": 136
486
+ },
487
+ {
488
+ "epoch": 0.04797496958108813,
489
+ "grad_norm": 1.944650349232646,
490
+ "learning_rate": 4.791666666666667e-05,
491
+ "loss": 0.7828,
492
+ "step": 138
493
+ },
494
+ {
495
+ "epoch": 0.048670258995306796,
496
+ "grad_norm": 2.321619459307342,
497
+ "learning_rate": 4.8611111111111115e-05,
498
+ "loss": 1.02,
499
+ "step": 140
500
+ },
501
+ {
502
+ "epoch": 0.049365548409525464,
503
+ "grad_norm": 5.317732559595606,
504
+ "learning_rate": 4.930555555555556e-05,
505
+ "loss": 0.9539,
506
+ "step": 142
507
+ },
508
+ {
509
+ "epoch": 0.05006083782374413,
510
+ "grad_norm": 1.5887292172279854,
511
+ "learning_rate": 5e-05,
512
+ "loss": 0.7961,
513
+ "step": 144
514
+ },
515
+ {
516
+ "epoch": 0.0507561272379628,
517
+ "grad_norm": 2.9987019111854964,
518
+ "learning_rate": 5.069444444444444e-05,
519
+ "loss": 1.0018,
520
+ "step": 146
521
+ },
522
+ {
523
+ "epoch": 0.05145141665218147,
524
+ "grad_norm": 5.63878906132749,
525
+ "learning_rate": 5.138888888888889e-05,
526
+ "loss": 0.9278,
527
+ "step": 148
528
+ },
529
+ {
530
+ "epoch": 0.052146706066400136,
531
+ "grad_norm": 2.4849685204332834,
532
+ "learning_rate": 5.208333333333334e-05,
533
+ "loss": 0.7531,
534
+ "step": 150
535
+ },
536
+ {
537
+ "epoch": 0.052146706066400136,
538
+ "eval_loss": 0.9286500215530396,
539
+ "eval_runtime": 711.2619,
540
+ "eval_samples_per_second": 6.808,
541
+ "eval_steps_per_second": 0.214,
542
+ "step": 150
543
+ },
544
+ {
545
+ "epoch": 0.052841995480618804,
546
+ "grad_norm": 3.4927981580288776,
547
+ "learning_rate": 5.2777777777777784e-05,
548
+ "loss": 0.7422,
549
+ "step": 152
550
+ },
551
+ {
552
+ "epoch": 0.05353728489483748,
553
+ "grad_norm": 2.6593724943984682,
554
+ "learning_rate": 5.3472222222222224e-05,
555
+ "loss": 0.7762,
556
+ "step": 154
557
+ },
558
+ {
559
+ "epoch": 0.05423257430905615,
560
+ "grad_norm": 2.99709354088536,
561
+ "learning_rate": 5.4166666666666664e-05,
562
+ "loss": 0.8658,
563
+ "step": 156
564
+ },
565
+ {
566
+ "epoch": 0.054927863723274815,
567
+ "grad_norm": 3.9196361623391414,
568
+ "learning_rate": 5.486111111111112e-05,
569
+ "loss": 1.1783,
570
+ "step": 158
571
+ },
572
+ {
573
+ "epoch": 0.05562315313749348,
574
+ "grad_norm": 3.022562685608673,
575
+ "learning_rate": 5.555555555555556e-05,
576
+ "loss": 1.0602,
577
+ "step": 160
578
+ },
579
+ {
580
+ "epoch": 0.05631844255171215,
581
+ "grad_norm": 3.292942684053579,
582
+ "learning_rate": 5.6250000000000005e-05,
583
+ "loss": 0.8561,
584
+ "step": 162
585
+ },
586
+ {
587
+ "epoch": 0.05701373196593082,
588
+ "grad_norm": 2.397775023338686,
589
+ "learning_rate": 5.6944444444444445e-05,
590
+ "loss": 0.9561,
591
+ "step": 164
592
+ },
593
+ {
594
+ "epoch": 0.05770902138014949,
595
+ "grad_norm": 3.0546714312119643,
596
+ "learning_rate": 5.7638888888888886e-05,
597
+ "loss": 0.8274,
598
+ "step": 166
599
+ },
600
+ {
601
+ "epoch": 0.058404310794368156,
602
+ "grad_norm": 2.4021883380894393,
603
+ "learning_rate": 5.833333333333334e-05,
604
+ "loss": 0.8492,
605
+ "step": 168
606
+ },
607
+ {
608
+ "epoch": 0.059099600208586824,
609
+ "grad_norm": 2.6052829770116293,
610
+ "learning_rate": 5.902777777777778e-05,
611
+ "loss": 1.1005,
612
+ "step": 170
613
+ },
614
+ {
615
+ "epoch": 0.05979488962280549,
616
+ "grad_norm": 1.6838192892320467,
617
+ "learning_rate": 5.972222222222223e-05,
618
+ "loss": 1.0157,
619
+ "step": 172
620
+ },
621
+ {
622
+ "epoch": 0.06049017903702416,
623
+ "grad_norm": 3.98880214871885,
624
+ "learning_rate": 6.041666666666667e-05,
625
+ "loss": 0.8136,
626
+ "step": 174
627
+ },
628
+ {
629
+ "epoch": 0.06118546845124283,
630
+ "grad_norm": 4.81494345341073,
631
+ "learning_rate": 6.111111111111112e-05,
632
+ "loss": 0.9475,
633
+ "step": 176
634
+ },
635
+ {
636
+ "epoch": 0.061880757865461496,
637
+ "grad_norm": 1.9926278890091862,
638
+ "learning_rate": 6.180555555555556e-05,
639
+ "loss": 0.6549,
640
+ "step": 178
641
+ },
642
+ {
643
+ "epoch": 0.06257604727968016,
644
+ "grad_norm": 1.750353030728397,
645
+ "learning_rate": 6.25e-05,
646
+ "loss": 1.1442,
647
+ "step": 180
648
+ },
649
+ {
650
+ "epoch": 0.06327133669389884,
651
+ "grad_norm": 4.77249782692129,
652
+ "learning_rate": 6.319444444444444e-05,
653
+ "loss": 1.1927,
654
+ "step": 182
655
+ },
656
+ {
657
+ "epoch": 0.0639666261081175,
658
+ "grad_norm": 1.866901788617278,
659
+ "learning_rate": 6.388888888888888e-05,
660
+ "loss": 1.0032,
661
+ "step": 184
662
+ },
663
+ {
664
+ "epoch": 0.06466191552233617,
665
+ "grad_norm": 1.7495859751833545,
666
+ "learning_rate": 6.458333333333334e-05,
667
+ "loss": 0.8439,
668
+ "step": 186
669
+ },
670
+ {
671
+ "epoch": 0.06535720493655484,
672
+ "grad_norm": 2.971674018184174,
673
+ "learning_rate": 6.527777777777778e-05,
674
+ "loss": 0.9817,
675
+ "step": 188
676
+ },
677
+ {
678
+ "epoch": 0.06605249435077351,
679
+ "grad_norm": 2.3753292673540165,
680
+ "learning_rate": 6.597222222222223e-05,
681
+ "loss": 0.9919,
682
+ "step": 190
683
+ },
684
+ {
685
+ "epoch": 0.06674778376499217,
686
+ "grad_norm": 2.7019148741557744,
687
+ "learning_rate": 6.666666666666667e-05,
688
+ "loss": 1.1442,
689
+ "step": 192
690
+ },
691
+ {
692
+ "epoch": 0.06744307317921085,
693
+ "grad_norm": 1.8670113287712482,
694
+ "learning_rate": 6.736111111111112e-05,
695
+ "loss": 0.8905,
696
+ "step": 194
697
+ },
698
+ {
699
+ "epoch": 0.06813836259342952,
700
+ "grad_norm": 1.4199145680760579,
701
+ "learning_rate": 6.805555555555556e-05,
702
+ "loss": 0.7223,
703
+ "step": 196
704
+ },
705
+ {
706
+ "epoch": 0.06883365200764818,
707
+ "grad_norm": 3.894200902880186,
708
+ "learning_rate": 6.875e-05,
709
+ "loss": 0.9005,
710
+ "step": 198
711
+ },
712
+ {
713
+ "epoch": 0.06952894142186686,
714
+ "grad_norm": 3.2710376491241955,
715
+ "learning_rate": 6.944444444444444e-05,
716
+ "loss": 1.1605,
717
+ "step": 200
718
+ },
719
+ {
720
+ "epoch": 0.07022423083608552,
721
+ "grad_norm": 1.6459754670035065,
722
+ "learning_rate": 7.013888888888888e-05,
723
+ "loss": 0.8551,
724
+ "step": 202
725
+ },
726
+ {
727
+ "epoch": 0.0709195202503042,
728
+ "grad_norm": 4.361031640374508,
729
+ "learning_rate": 7.083333333333334e-05,
730
+ "loss": 0.7007,
731
+ "step": 204
732
+ },
733
+ {
734
+ "epoch": 0.07161480966452285,
735
+ "grad_norm": 3.573741549123141,
736
+ "learning_rate": 7.152777777777778e-05,
737
+ "loss": 1.1396,
738
+ "step": 206
739
+ },
740
+ {
741
+ "epoch": 0.07231009907874153,
742
+ "grad_norm": 3.296990311359108,
743
+ "learning_rate": 7.222222222222222e-05,
744
+ "loss": 0.8695,
745
+ "step": 208
746
+ },
747
+ {
748
+ "epoch": 0.07300538849296019,
749
+ "grad_norm": 6.769659823038884,
750
+ "learning_rate": 7.291666666666667e-05,
751
+ "loss": 1.0511,
752
+ "step": 210
753
+ },
754
+ {
755
+ "epoch": 0.07370067790717887,
756
+ "grad_norm": 1.6695233666860303,
757
+ "learning_rate": 7.361111111111111e-05,
758
+ "loss": 0.8174,
759
+ "step": 212
760
+ },
761
+ {
762
+ "epoch": 0.07439596732139753,
763
+ "grad_norm": 1.3543755216281146,
764
+ "learning_rate": 7.430555555555557e-05,
765
+ "loss": 0.7137,
766
+ "step": 214
767
+ },
768
+ {
769
+ "epoch": 0.0750912567356162,
770
+ "grad_norm": 4.861063813937456,
771
+ "learning_rate": 7.500000000000001e-05,
772
+ "loss": 1.0132,
773
+ "step": 216
774
+ },
775
+ {
776
+ "epoch": 0.07578654614983486,
777
+ "grad_norm": 5.715913176528681,
778
+ "learning_rate": 7.569444444444445e-05,
779
+ "loss": 1.1657,
780
+ "step": 218
781
+ },
782
+ {
783
+ "epoch": 0.07648183556405354,
784
+ "grad_norm": 4.0193839358302235,
785
+ "learning_rate": 7.638888888888889e-05,
786
+ "loss": 1.0547,
787
+ "step": 220
788
+ },
789
+ {
790
+ "epoch": 0.0771771249782722,
791
+ "grad_norm": 2.26849134517291,
792
+ "learning_rate": 7.708333333333334e-05,
793
+ "loss": 0.9073,
794
+ "step": 222
795
+ },
796
+ {
797
+ "epoch": 0.07787241439249087,
798
+ "grad_norm": 4.570943697810998,
799
+ "learning_rate": 7.777777777777778e-05,
800
+ "loss": 1.3726,
801
+ "step": 224
802
+ },
803
+ {
804
+ "epoch": 0.07856770380670955,
805
+ "grad_norm": 1.1942436910880105,
806
+ "learning_rate": 7.847222222222222e-05,
807
+ "loss": 0.8146,
808
+ "step": 226
809
+ },
810
+ {
811
+ "epoch": 0.07926299322092821,
812
+ "grad_norm": 2.111849588751211,
813
+ "learning_rate": 7.916666666666666e-05,
814
+ "loss": 1.1157,
815
+ "step": 228
816
+ },
817
+ {
818
+ "epoch": 0.07995828263514689,
819
+ "grad_norm": 1.0283088880069582,
820
+ "learning_rate": 7.986111111111112e-05,
821
+ "loss": 0.767,
822
+ "step": 230
823
+ },
824
+ {
825
+ "epoch": 0.08065357204936555,
826
+ "grad_norm": 1.2834055069208525,
827
+ "learning_rate": 8.055555555555556e-05,
828
+ "loss": 0.6625,
829
+ "step": 232
830
+ },
831
+ {
832
+ "epoch": 0.08134886146358422,
833
+ "grad_norm": 1.3772942873595098,
834
+ "learning_rate": 8.125000000000001e-05,
835
+ "loss": 0.8065,
836
+ "step": 234
837
+ },
838
+ {
839
+ "epoch": 0.08204415087780288,
840
+ "grad_norm": 1.5113456549735176,
841
+ "learning_rate": 8.194444444444445e-05,
842
+ "loss": 0.8606,
843
+ "step": 236
844
+ },
845
+ {
846
+ "epoch": 0.08273944029202156,
847
+ "grad_norm": 1.5765846352838255,
848
+ "learning_rate": 8.263888888888889e-05,
849
+ "loss": 0.8335,
850
+ "step": 238
851
+ },
852
+ {
853
+ "epoch": 0.08343472970624022,
854
+ "grad_norm": 2.4873462478329404,
855
+ "learning_rate": 8.333333333333334e-05,
856
+ "loss": 0.9705,
857
+ "step": 240
858
+ },
859
+ {
860
+ "epoch": 0.0841300191204589,
861
+ "grad_norm": 1.2369219925635513,
862
+ "learning_rate": 8.402777777777778e-05,
863
+ "loss": 0.6061,
864
+ "step": 242
865
+ },
866
+ {
867
+ "epoch": 0.08482530853467755,
868
+ "grad_norm": 2.542132212473201,
869
+ "learning_rate": 8.472222222222222e-05,
870
+ "loss": 0.9142,
871
+ "step": 244
872
+ },
873
+ {
874
+ "epoch": 0.08552059794889623,
875
+ "grad_norm": 2.0301734217803022,
876
+ "learning_rate": 8.541666666666666e-05,
877
+ "loss": 0.8997,
878
+ "step": 246
879
+ },
880
+ {
881
+ "epoch": 0.08621588736311489,
882
+ "grad_norm": 1.8605316982945626,
883
+ "learning_rate": 8.611111111111112e-05,
884
+ "loss": 1.005,
885
+ "step": 248
886
+ },
887
+ {
888
+ "epoch": 0.08691117677733357,
889
+ "grad_norm": 1.193555257951713,
890
+ "learning_rate": 8.680555555555556e-05,
891
+ "loss": 0.8617,
892
+ "step": 250
893
+ },
894
+ {
895
+ "epoch": 0.08760646619155224,
896
+ "grad_norm": 1.243815428863678,
897
+ "learning_rate": 8.75e-05,
898
+ "loss": 0.6261,
899
+ "step": 252
900
+ },
901
+ {
902
+ "epoch": 0.0883017556057709,
903
+ "grad_norm": 1.6487754861704442,
904
+ "learning_rate": 8.819444444444445e-05,
905
+ "loss": 0.9219,
906
+ "step": 254
907
+ },
908
+ {
909
+ "epoch": 0.08899704501998958,
910
+ "grad_norm": 1.1768410857322613,
911
+ "learning_rate": 8.888888888888889e-05,
912
+ "loss": 0.8563,
913
+ "step": 256
914
+ },
915
+ {
916
+ "epoch": 0.08969233443420824,
917
+ "grad_norm": 1.1155265191420587,
918
+ "learning_rate": 8.958333333333335e-05,
919
+ "loss": 0.998,
920
+ "step": 258
921
+ },
922
+ {
923
+ "epoch": 0.09038762384842691,
924
+ "grad_norm": 1.4432456616674065,
925
+ "learning_rate": 9.027777777777779e-05,
926
+ "loss": 0.8755,
927
+ "step": 260
928
+ },
929
+ {
930
+ "epoch": 0.09108291326264557,
931
+ "grad_norm": 1.6013486668654413,
932
+ "learning_rate": 9.097222222222223e-05,
933
+ "loss": 0.6998,
934
+ "step": 262
935
+ },
936
+ {
937
+ "epoch": 0.09177820267686425,
938
+ "grad_norm": 0.7869968770186737,
939
+ "learning_rate": 9.166666666666667e-05,
940
+ "loss": 0.7574,
941
+ "step": 264
942
+ },
943
+ {
944
+ "epoch": 0.09247349209108291,
945
+ "grad_norm": 2.1117903903864566,
946
+ "learning_rate": 9.236111111111112e-05,
947
+ "loss": 0.8436,
948
+ "step": 266
949
+ },
950
+ {
951
+ "epoch": 0.09316878150530158,
952
+ "grad_norm": 1.3582999584721895,
953
+ "learning_rate": 9.305555555555556e-05,
954
+ "loss": 0.8503,
955
+ "step": 268
956
+ },
957
+ {
958
+ "epoch": 0.09386407091952025,
959
+ "grad_norm": 1.5674581009005415,
960
+ "learning_rate": 9.375e-05,
961
+ "loss": 0.9561,
962
+ "step": 270
963
+ },
964
+ {
965
+ "epoch": 0.09455936033373892,
966
+ "grad_norm": 1.0274246934159952,
967
+ "learning_rate": 9.444444444444444e-05,
968
+ "loss": 0.7537,
969
+ "step": 272
970
+ },
971
+ {
972
+ "epoch": 0.09525464974795758,
973
+ "grad_norm": 0.9595343432519174,
974
+ "learning_rate": 9.513888888888888e-05,
975
+ "loss": 0.7855,
976
+ "step": 274
977
+ },
978
+ {
979
+ "epoch": 0.09594993916217626,
980
+ "grad_norm": 1.3313115114367815,
981
+ "learning_rate": 9.583333333333334e-05,
982
+ "loss": 0.7009,
983
+ "step": 276
984
+ },
985
+ {
986
+ "epoch": 0.09664522857639492,
987
+ "grad_norm": 1.4409463331317498,
988
+ "learning_rate": 9.652777777777779e-05,
989
+ "loss": 1.0332,
990
+ "step": 278
991
+ },
992
+ {
993
+ "epoch": 0.09734051799061359,
994
+ "grad_norm": 1.5445848213023137,
995
+ "learning_rate": 9.722222222222223e-05,
996
+ "loss": 0.7352,
997
+ "step": 280
998
+ },
999
+ {
1000
+ "epoch": 0.09803580740483227,
1001
+ "grad_norm": 1.855518620927316,
1002
+ "learning_rate": 9.791666666666667e-05,
1003
+ "loss": 0.7191,
1004
+ "step": 282
1005
+ },
1006
+ {
1007
+ "epoch": 0.09873109681905093,
1008
+ "grad_norm": 1.4033396985161997,
1009
+ "learning_rate": 9.861111111111112e-05,
1010
+ "loss": 0.7886,
1011
+ "step": 284
1012
+ },
1013
+ {
1014
+ "epoch": 0.0994263862332696,
1015
+ "grad_norm": 1.9951521438049904,
1016
+ "learning_rate": 9.930555555555556e-05,
1017
+ "loss": 0.902,
1018
+ "step": 286
1019
+ },
1020
+ {
1021
+ "epoch": 0.10012167564748826,
1022
+ "grad_norm": 1.1360934228431687,
1023
+ "learning_rate": 0.0001,
1024
+ "loss": 0.8655,
1025
+ "step": 288
1026
+ },
1027
+ {
1028
+ "epoch": 0.10081696506170694,
1029
+ "grad_norm": 1.1225735433900375,
1030
+ "learning_rate": 0.00010069444444444445,
1031
+ "loss": 0.6851,
1032
+ "step": 290
1033
+ },
1034
+ {
1035
+ "epoch": 0.1015122544759256,
1036
+ "grad_norm": 0.770446891371583,
1037
+ "learning_rate": 0.00010138888888888889,
1038
+ "loss": 0.7775,
1039
+ "step": 292
1040
+ },
1041
+ {
1042
+ "epoch": 0.10220754389014428,
1043
+ "grad_norm": 0.797064180835607,
1044
+ "learning_rate": 0.00010208333333333333,
1045
+ "loss": 0.6567,
1046
+ "step": 294
1047
+ },
1048
+ {
1049
+ "epoch": 0.10290283330436294,
1050
+ "grad_norm": 0.9876188134326442,
1051
+ "learning_rate": 0.00010277777777777778,
1052
+ "loss": 0.7299,
1053
+ "step": 296
1054
+ },
1055
+ {
1056
+ "epoch": 0.10359812271858161,
1057
+ "grad_norm": 0.799601623643537,
1058
+ "learning_rate": 0.00010347222222222223,
1059
+ "loss": 0.7891,
1060
+ "step": 298
1061
+ },
1062
+ {
1063
+ "epoch": 0.10429341213280027,
1064
+ "grad_norm": 0.8791512281362982,
1065
+ "learning_rate": 0.00010416666666666667,
1066
+ "loss": 0.677,
1067
+ "step": 300
1068
+ },
1069
+ {
1070
+ "epoch": 0.10429341213280027,
1071
+ "eval_loss": 0.6939894556999207,
1072
+ "eval_runtime": 709.5268,
1073
+ "eval_samples_per_second": 6.824,
1074
+ "eval_steps_per_second": 0.214,
1075
+ "step": 300
1076
+ },
1077
+ {
1078
+ "epoch": 0.10498870154701895,
1079
+ "grad_norm": 1.329564645811689,
1080
+ "learning_rate": 0.00010486111111111113,
1081
+ "loss": 0.7511,
1082
+ "step": 302
1083
+ },
1084
+ {
1085
+ "epoch": 0.10568399096123761,
1086
+ "grad_norm": 0.7130415058241142,
1087
+ "learning_rate": 0.00010555555555555557,
1088
+ "loss": 0.709,
1089
+ "step": 304
1090
+ },
1091
+ {
1092
+ "epoch": 0.10637928037545628,
1093
+ "grad_norm": 1.1294733411370705,
1094
+ "learning_rate": 0.00010625000000000001,
1095
+ "loss": 0.7781,
1096
+ "step": 306
1097
+ },
1098
+ {
1099
+ "epoch": 0.10707456978967496,
1100
+ "grad_norm": 0.581727680778003,
1101
+ "learning_rate": 0.00010694444444444445,
1102
+ "loss": 0.6296,
1103
+ "step": 308
1104
+ },
1105
+ {
1106
+ "epoch": 0.10776985920389362,
1107
+ "grad_norm": 0.9855391514311871,
1108
+ "learning_rate": 0.00010763888888888889,
1109
+ "loss": 0.7852,
1110
+ "step": 310
1111
+ },
1112
+ {
1113
+ "epoch": 0.1084651486181123,
1114
+ "grad_norm": 0.5708379609857769,
1115
+ "learning_rate": 0.00010833333333333333,
1116
+ "loss": 0.6759,
1117
+ "step": 312
1118
+ },
1119
+ {
1120
+ "epoch": 0.10916043803233096,
1121
+ "grad_norm": 0.7243330859752051,
1122
+ "learning_rate": 0.00010902777777777777,
1123
+ "loss": 0.6326,
1124
+ "step": 314
1125
+ },
1126
+ {
1127
+ "epoch": 0.10985572744654963,
1128
+ "grad_norm": 1.3922163003264714,
1129
+ "learning_rate": 0.00010972222222222224,
1130
+ "loss": 0.5406,
1131
+ "step": 316
1132
+ },
1133
+ {
1134
+ "epoch": 0.11055101686076829,
1135
+ "grad_norm": 2.604851740418153,
1136
+ "learning_rate": 0.00011041666666666668,
1137
+ "loss": 0.8013,
1138
+ "step": 318
1139
+ },
1140
+ {
1141
+ "epoch": 0.11124630627498697,
1142
+ "grad_norm": 1.83511037983023,
1143
+ "learning_rate": 0.00011111111111111112,
1144
+ "loss": 0.8048,
1145
+ "step": 320
1146
+ },
1147
+ {
1148
+ "epoch": 0.11194159568920563,
1149
+ "grad_norm": 1.6737764614655666,
1150
+ "learning_rate": 0.00011180555555555556,
1151
+ "loss": 0.6943,
1152
+ "step": 322
1153
+ },
1154
+ {
1155
+ "epoch": 0.1126368851034243,
1156
+ "grad_norm": 1.077667781888673,
1157
+ "learning_rate": 0.00011250000000000001,
1158
+ "loss": 0.8054,
1159
+ "step": 324
1160
+ },
1161
+ {
1162
+ "epoch": 0.11333217451764296,
1163
+ "grad_norm": 0.5597693838209001,
1164
+ "learning_rate": 0.00011319444444444445,
1165
+ "loss": 0.6251,
1166
+ "step": 326
1167
+ },
1168
+ {
1169
+ "epoch": 0.11402746393186164,
1170
+ "grad_norm": 0.5636865078063477,
1171
+ "learning_rate": 0.00011388888888888889,
1172
+ "loss": 0.6832,
1173
+ "step": 328
1174
+ },
1175
+ {
1176
+ "epoch": 0.1147227533460803,
1177
+ "grad_norm": 0.6445176566556912,
1178
+ "learning_rate": 0.00011458333333333333,
1179
+ "loss": 0.6661,
1180
+ "step": 330
1181
+ },
1182
+ {
1183
+ "epoch": 0.11541804276029898,
1184
+ "grad_norm": 0.607532412895966,
1185
+ "learning_rate": 0.00011527777777777777,
1186
+ "loss": 0.7026,
1187
+ "step": 332
1188
+ },
1189
+ {
1190
+ "epoch": 0.11611333217451764,
1191
+ "grad_norm": 1.0496949694240345,
1192
+ "learning_rate": 0.00011597222222222224,
1193
+ "loss": 0.6451,
1194
+ "step": 334
1195
+ },
1196
+ {
1197
+ "epoch": 0.11680862158873631,
1198
+ "grad_norm": 0.9469233272179363,
1199
+ "learning_rate": 0.00011666666666666668,
1200
+ "loss": 0.6694,
1201
+ "step": 336
1202
+ },
1203
+ {
1204
+ "epoch": 0.11750391100295499,
1205
+ "grad_norm": 1.3789882614528595,
1206
+ "learning_rate": 0.00011736111111111112,
1207
+ "loss": 0.7031,
1208
+ "step": 338
1209
+ },
1210
+ {
1211
+ "epoch": 0.11819920041717365,
1212
+ "grad_norm": 1.2016589928587922,
1213
+ "learning_rate": 0.00011805555555555556,
1214
+ "loss": 0.6676,
1215
+ "step": 340
1216
+ },
1217
+ {
1218
+ "epoch": 0.11889448983139232,
1219
+ "grad_norm": 1.3112845410865746,
1220
+ "learning_rate": 0.00011875,
1221
+ "loss": 0.7236,
1222
+ "step": 342
1223
+ },
1224
+ {
1225
+ "epoch": 0.11958977924561098,
1226
+ "grad_norm": 0.6278789052805739,
1227
+ "learning_rate": 0.00011944444444444445,
1228
+ "loss": 0.6249,
1229
+ "step": 344
1230
+ },
1231
+ {
1232
+ "epoch": 0.12028506865982966,
1233
+ "grad_norm": 0.5405148902553805,
1234
+ "learning_rate": 0.0001201388888888889,
1235
+ "loss": 0.6412,
1236
+ "step": 346
1237
+ },
1238
+ {
1239
+ "epoch": 0.12098035807404832,
1240
+ "grad_norm": 1.6923616138961255,
1241
+ "learning_rate": 0.00012083333333333333,
1242
+ "loss": 0.7017,
1243
+ "step": 348
1244
+ },
1245
+ {
1246
+ "epoch": 0.121675647488267,
1247
+ "grad_norm": 1.5994113095813072,
1248
+ "learning_rate": 0.00012152777777777777,
1249
+ "loss": 0.7075,
1250
+ "step": 350
1251
+ },
1252
+ {
1253
+ "epoch": 0.12237093690248566,
1254
+ "grad_norm": 2.3083516502461783,
1255
+ "learning_rate": 0.00012222222222222224,
1256
+ "loss": 0.7755,
1257
+ "step": 352
1258
+ },
1259
+ {
1260
+ "epoch": 0.12306622631670433,
1261
+ "grad_norm": 0.8769167165306447,
1262
+ "learning_rate": 0.00012291666666666668,
1263
+ "loss": 0.7428,
1264
+ "step": 354
1265
+ },
1266
+ {
1267
+ "epoch": 0.12376151573092299,
1268
+ "grad_norm": 0.4456469638797518,
1269
+ "learning_rate": 0.00012361111111111112,
1270
+ "loss": 0.7002,
1271
+ "step": 356
1272
+ },
1273
+ {
1274
+ "epoch": 0.12445680514514167,
1275
+ "grad_norm": 0.8199993647792723,
1276
+ "learning_rate": 0.00012430555555555556,
1277
+ "loss": 0.7222,
1278
+ "step": 358
1279
+ },
1280
+ {
1281
+ "epoch": 0.12515209455936033,
1282
+ "grad_norm": 0.3741084955970339,
1283
+ "learning_rate": 0.000125,
1284
+ "loss": 0.6821,
1285
+ "step": 360
1286
+ },
1287
+ {
1288
+ "epoch": 0.125847383973579,
1289
+ "grad_norm": 0.2972203241099783,
1290
+ "learning_rate": 0.00012569444444444444,
1291
+ "loss": 0.7083,
1292
+ "step": 362
1293
+ },
1294
+ {
1295
+ "epoch": 0.12654267338779768,
1296
+ "grad_norm": 0.40651740632246575,
1297
+ "learning_rate": 0.00012638888888888888,
1298
+ "loss": 0.7144,
1299
+ "step": 364
1300
+ },
1301
+ {
1302
+ "epoch": 0.12723796280201635,
1303
+ "grad_norm": 0.4388093199704831,
1304
+ "learning_rate": 0.00012708333333333332,
1305
+ "loss": 0.6863,
1306
+ "step": 366
1307
+ },
1308
+ {
1309
+ "epoch": 0.127933252216235,
1310
+ "grad_norm": 0.45131267837810835,
1311
+ "learning_rate": 0.00012777777777777776,
1312
+ "loss": 0.6329,
1313
+ "step": 368
1314
+ },
1315
+ {
1316
+ "epoch": 0.12862854163045367,
1317
+ "grad_norm": 0.3837830352665476,
1318
+ "learning_rate": 0.00012847222222222223,
1319
+ "loss": 0.6938,
1320
+ "step": 370
1321
+ },
1322
+ {
1323
+ "epoch": 0.12932383104467235,
1324
+ "grad_norm": 0.6378624076702905,
1325
+ "learning_rate": 0.00012916666666666667,
1326
+ "loss": 0.6433,
1327
+ "step": 372
1328
+ },
1329
+ {
1330
+ "epoch": 0.13001912045889102,
1331
+ "grad_norm": 0.41344403509072675,
1332
+ "learning_rate": 0.0001298611111111111,
1333
+ "loss": 0.6892,
1334
+ "step": 374
1335
+ },
1336
+ {
1337
+ "epoch": 0.13071440987310967,
1338
+ "grad_norm": 1.3428471718098582,
1339
+ "learning_rate": 0.00013055555555555555,
1340
+ "loss": 0.7793,
1341
+ "step": 376
1342
+ },
1343
+ {
1344
+ "epoch": 0.13140969928732835,
1345
+ "grad_norm": 0.6202275876483008,
1346
+ "learning_rate": 0.00013125000000000002,
1347
+ "loss": 0.6836,
1348
+ "step": 378
1349
+ },
1350
+ {
1351
+ "epoch": 0.13210498870154702,
1352
+ "grad_norm": 0.774014383396004,
1353
+ "learning_rate": 0.00013194444444444446,
1354
+ "loss": 0.701,
1355
+ "step": 380
1356
+ },
1357
+ {
1358
+ "epoch": 0.1328002781157657,
1359
+ "grad_norm": 1.0650744092292224,
1360
+ "learning_rate": 0.0001326388888888889,
1361
+ "loss": 0.6562,
1362
+ "step": 382
1363
+ },
1364
+ {
1365
+ "epoch": 0.13349556752998434,
1366
+ "grad_norm": 0.5540168492927294,
1367
+ "learning_rate": 0.00013333333333333334,
1368
+ "loss": 0.6267,
1369
+ "step": 384
1370
+ },
1371
+ {
1372
+ "epoch": 0.13419085694420302,
1373
+ "grad_norm": 0.6403086140920178,
1374
+ "learning_rate": 0.00013402777777777778,
1375
+ "loss": 0.6268,
1376
+ "step": 386
1377
+ },
1378
+ {
1379
+ "epoch": 0.1348861463584217,
1380
+ "grad_norm": 0.9197218298114509,
1381
+ "learning_rate": 0.00013472222222222225,
1382
+ "loss": 0.7208,
1383
+ "step": 388
1384
+ },
1385
+ {
1386
+ "epoch": 0.13558143577264037,
1387
+ "grad_norm": 0.4235387152144674,
1388
+ "learning_rate": 0.0001354166666666667,
1389
+ "loss": 0.6057,
1390
+ "step": 390
1391
+ },
1392
+ {
1393
+ "epoch": 0.13627672518685904,
1394
+ "grad_norm": 0.425648636635043,
1395
+ "learning_rate": 0.00013611111111111113,
1396
+ "loss": 0.6387,
1397
+ "step": 392
1398
+ },
1399
+ {
1400
+ "epoch": 0.1369720146010777,
1401
+ "grad_norm": 1.034952396366508,
1402
+ "learning_rate": 0.00013680555555555557,
1403
+ "loss": 0.7307,
1404
+ "step": 394
1405
+ },
1406
+ {
1407
+ "epoch": 0.13766730401529637,
1408
+ "grad_norm": 0.8865505732510678,
1409
+ "learning_rate": 0.0001375,
1410
+ "loss": 0.6547,
1411
+ "step": 396
1412
+ },
1413
+ {
1414
+ "epoch": 0.13836259342951504,
1415
+ "grad_norm": 0.6490094573088415,
1416
+ "learning_rate": 0.00013819444444444445,
1417
+ "loss": 0.6909,
1418
+ "step": 398
1419
+ },
1420
+ {
1421
+ "epoch": 0.13905788284373372,
1422
+ "grad_norm": 0.8592713330921486,
1423
+ "learning_rate": 0.0001388888888888889,
1424
+ "loss": 0.6442,
1425
+ "step": 400
1426
+ },
1427
+ {
1428
+ "epoch": 0.13975317225795236,
1429
+ "grad_norm": 1.0279375953862069,
1430
+ "learning_rate": 0.00013958333333333333,
1431
+ "loss": 0.6907,
1432
+ "step": 402
1433
+ },
1434
+ {
1435
+ "epoch": 0.14044846167217104,
1436
+ "grad_norm": 0.6151057351983696,
1437
+ "learning_rate": 0.00014027777777777777,
1438
+ "loss": 0.697,
1439
+ "step": 404
1440
+ },
1441
+ {
1442
+ "epoch": 0.1411437510863897,
1443
+ "grad_norm": 0.7417879717121144,
1444
+ "learning_rate": 0.00014097222222222224,
1445
+ "loss": 0.698,
1446
+ "step": 406
1447
+ },
1448
+ {
1449
+ "epoch": 0.1418390405006084,
1450
+ "grad_norm": 0.5161453091859882,
1451
+ "learning_rate": 0.00014166666666666668,
1452
+ "loss": 0.6915,
1453
+ "step": 408
1454
+ },
1455
+ {
1456
+ "epoch": 0.14253432991482703,
1457
+ "grad_norm": 0.5667521096080546,
1458
+ "learning_rate": 0.00014236111111111112,
1459
+ "loss": 0.6609,
1460
+ "step": 410
1461
+ },
1462
+ {
1463
+ "epoch": 0.1432296193290457,
1464
+ "grad_norm": 0.7560317583703429,
1465
+ "learning_rate": 0.00014305555555555556,
1466
+ "loss": 0.6392,
1467
+ "step": 412
1468
+ },
1469
+ {
1470
+ "epoch": 0.14392490874326438,
1471
+ "grad_norm": 0.5456303981546313,
1472
+ "learning_rate": 0.00014375,
1473
+ "loss": 0.7561,
1474
+ "step": 414
1475
+ },
1476
+ {
1477
+ "epoch": 0.14462019815748306,
1478
+ "grad_norm": 0.4218469018151631,
1479
+ "learning_rate": 0.00014444444444444444,
1480
+ "loss": 0.675,
1481
+ "step": 416
1482
+ },
1483
+ {
1484
+ "epoch": 0.14531548757170173,
1485
+ "grad_norm": 0.3790517974518404,
1486
+ "learning_rate": 0.00014513888888888888,
1487
+ "loss": 0.6697,
1488
+ "step": 418
1489
+ },
1490
+ {
1491
+ "epoch": 0.14601077698592038,
1492
+ "grad_norm": 0.36495171567914964,
1493
+ "learning_rate": 0.00014583333333333335,
1494
+ "loss": 0.6467,
1495
+ "step": 420
1496
+ },
1497
+ {
1498
+ "epoch": 0.14670606640013906,
1499
+ "grad_norm": 0.3793520501177419,
1500
+ "learning_rate": 0.00014652777777777779,
1501
+ "loss": 0.6234,
1502
+ "step": 422
1503
+ },
1504
+ {
1505
+ "epoch": 0.14740135581435773,
1506
+ "grad_norm": 0.6214905371013544,
1507
+ "learning_rate": 0.00014722222222222223,
1508
+ "loss": 0.64,
1509
+ "step": 424
1510
+ },
1511
+ {
1512
+ "epoch": 0.1480966452285764,
1513
+ "grad_norm": 0.4103438113660832,
1514
+ "learning_rate": 0.0001479166666666667,
1515
+ "loss": 0.6174,
1516
+ "step": 426
1517
+ },
1518
+ {
1519
+ "epoch": 0.14879193464279505,
1520
+ "grad_norm": 0.5802311029669485,
1521
+ "learning_rate": 0.00014861111111111113,
1522
+ "loss": 0.6941,
1523
+ "step": 428
1524
+ },
1525
+ {
1526
+ "epoch": 0.14948722405701373,
1527
+ "grad_norm": 0.4876572424583591,
1528
+ "learning_rate": 0.00014930555555555557,
1529
+ "loss": 0.6178,
1530
+ "step": 430
1531
+ },
1532
+ {
1533
+ "epoch": 0.1501825134712324,
1534
+ "grad_norm": 0.6252220373472688,
1535
+ "learning_rate": 0.00015000000000000001,
1536
+ "loss": 0.692,
1537
+ "step": 432
1538
+ },
1539
+ {
1540
+ "epoch": 0.15087780288545108,
1541
+ "grad_norm": 1.516758455176553,
1542
+ "learning_rate": 0.00015069444444444445,
1543
+ "loss": 0.6812,
1544
+ "step": 434
1545
+ },
1546
+ {
1547
+ "epoch": 0.15157309229966973,
1548
+ "grad_norm": 0.48294685446689867,
1549
+ "learning_rate": 0.0001513888888888889,
1550
+ "loss": 0.7085,
1551
+ "step": 436
1552
+ },
1553
+ {
1554
+ "epoch": 0.1522683817138884,
1555
+ "grad_norm": 0.414196721150167,
1556
+ "learning_rate": 0.00015208333333333333,
1557
+ "loss": 0.673,
1558
+ "step": 438
1559
+ },
1560
+ {
1561
+ "epoch": 0.15296367112810708,
1562
+ "grad_norm": 0.7294881062503469,
1563
+ "learning_rate": 0.00015277777777777777,
1564
+ "loss": 0.6415,
1565
+ "step": 440
1566
+ },
1567
+ {
1568
+ "epoch": 0.15365896054232575,
1569
+ "grad_norm": 0.4846932912054438,
1570
+ "learning_rate": 0.00015347222222222224,
1571
+ "loss": 0.6644,
1572
+ "step": 442
1573
+ },
1574
+ {
1575
+ "epoch": 0.1543542499565444,
1576
+ "grad_norm": 0.6067400167426414,
1577
+ "learning_rate": 0.00015416666666666668,
1578
+ "loss": 0.6816,
1579
+ "step": 444
1580
+ },
1581
+ {
1582
+ "epoch": 0.15504953937076307,
1583
+ "grad_norm": 0.5586278026791864,
1584
+ "learning_rate": 0.00015486111111111112,
1585
+ "loss": 0.6223,
1586
+ "step": 446
1587
+ },
1588
+ {
1589
+ "epoch": 0.15574482878498175,
1590
+ "grad_norm": 0.3947236166856149,
1591
+ "learning_rate": 0.00015555555555555556,
1592
+ "loss": 0.7297,
1593
+ "step": 448
1594
+ },
1595
+ {
1596
+ "epoch": 0.15644011819920042,
1597
+ "grad_norm": 0.371457756887947,
1598
+ "learning_rate": 0.00015625,
1599
+ "loss": 0.6099,
1600
+ "step": 450
1601
+ },
1602
+ {
1603
+ "epoch": 0.15644011819920042,
1604
+ "eval_loss": 0.6634477376937866,
1605
+ "eval_runtime": 706.8027,
1606
+ "eval_samples_per_second": 6.851,
1607
+ "eval_steps_per_second": 0.215,
1608
+ "step": 450
1609
+ },
1610
+ {
1611
+ "epoch": 0.1571354076134191,
1612
+ "grad_norm": 0.4157928192338373,
1613
+ "learning_rate": 0.00015694444444444444,
1614
+ "loss": 0.7163,
1615
+ "step": 452
1616
+ },
1617
+ {
1618
+ "epoch": 0.15783069702763775,
1619
+ "grad_norm": 0.6318513446857751,
1620
+ "learning_rate": 0.00015763888888888888,
1621
+ "loss": 0.658,
1622
+ "step": 454
1623
+ },
1624
+ {
1625
+ "epoch": 0.15852598644185642,
1626
+ "grad_norm": 0.3188170086808347,
1627
+ "learning_rate": 0.00015833333333333332,
1628
+ "loss": 0.5966,
1629
+ "step": 456
1630
+ },
1631
+ {
1632
+ "epoch": 0.1592212758560751,
1633
+ "grad_norm": 0.4873146052629784,
1634
+ "learning_rate": 0.00015902777777777776,
1635
+ "loss": 0.687,
1636
+ "step": 458
1637
+ },
1638
+ {
1639
+ "epoch": 0.15991656527029377,
1640
+ "grad_norm": 0.3939618900754228,
1641
+ "learning_rate": 0.00015972222222222223,
1642
+ "loss": 0.6704,
1643
+ "step": 460
1644
+ },
1645
+ {
1646
+ "epoch": 0.16061185468451242,
1647
+ "grad_norm": 0.43442241310529234,
1648
+ "learning_rate": 0.00016041666666666667,
1649
+ "loss": 0.6941,
1650
+ "step": 462
1651
+ },
1652
+ {
1653
+ "epoch": 0.1613071440987311,
1654
+ "grad_norm": 0.5944395304786771,
1655
+ "learning_rate": 0.0001611111111111111,
1656
+ "loss": 0.5934,
1657
+ "step": 464
1658
+ },
1659
+ {
1660
+ "epoch": 0.16200243351294977,
1661
+ "grad_norm": 0.6186667715273749,
1662
+ "learning_rate": 0.00016180555555555555,
1663
+ "loss": 0.6604,
1664
+ "step": 466
1665
+ },
1666
+ {
1667
+ "epoch": 0.16269772292716844,
1668
+ "grad_norm": 0.8945416923537124,
1669
+ "learning_rate": 0.00016250000000000002,
1670
+ "loss": 0.6224,
1671
+ "step": 468
1672
+ },
1673
+ {
1674
+ "epoch": 0.1633930123413871,
1675
+ "grad_norm": 1.0526200270890014,
1676
+ "learning_rate": 0.00016319444444444446,
1677
+ "loss": 0.616,
1678
+ "step": 470
1679
+ },
1680
+ {
1681
+ "epoch": 0.16408830175560576,
1682
+ "grad_norm": 0.4575985966727194,
1683
+ "learning_rate": 0.0001638888888888889,
1684
+ "loss": 0.6591,
1685
+ "step": 472
1686
+ },
1687
+ {
1688
+ "epoch": 0.16478359116982444,
1689
+ "grad_norm": 0.4499931204645402,
1690
+ "learning_rate": 0.00016458333333333334,
1691
+ "loss": 0.6644,
1692
+ "step": 474
1693
+ },
1694
+ {
1695
+ "epoch": 0.16547888058404311,
1696
+ "grad_norm": 0.8365574384741885,
1697
+ "learning_rate": 0.00016527777777777778,
1698
+ "loss": 0.6503,
1699
+ "step": 476
1700
+ },
1701
+ {
1702
+ "epoch": 0.1661741699982618,
1703
+ "grad_norm": 0.5265284650577726,
1704
+ "learning_rate": 0.00016597222222222225,
1705
+ "loss": 0.557,
1706
+ "step": 478
1707
+ },
1708
+ {
1709
+ "epoch": 0.16686945941248044,
1710
+ "grad_norm": 0.6004742828917694,
1711
+ "learning_rate": 0.0001666666666666667,
1712
+ "loss": 0.696,
1713
+ "step": 480
1714
+ },
1715
+ {
1716
+ "epoch": 0.1675647488266991,
1717
+ "grad_norm": 0.8653134209549845,
1718
+ "learning_rate": 0.00016736111111111113,
1719
+ "loss": 0.6544,
1720
+ "step": 482
1721
+ },
1722
+ {
1723
+ "epoch": 0.1682600382409178,
1724
+ "grad_norm": 1.033230873418064,
1725
+ "learning_rate": 0.00016805555555555557,
1726
+ "loss": 0.641,
1727
+ "step": 484
1728
+ },
1729
+ {
1730
+ "epoch": 0.16895532765513646,
1731
+ "grad_norm": 0.45457193405665913,
1732
+ "learning_rate": 0.00016875,
1733
+ "loss": 0.5952,
1734
+ "step": 486
1735
+ },
1736
+ {
1737
+ "epoch": 0.1696506170693551,
1738
+ "grad_norm": 0.7413902971651539,
1739
+ "learning_rate": 0.00016944444444444445,
1740
+ "loss": 0.6306,
1741
+ "step": 488
1742
+ },
1743
+ {
1744
+ "epoch": 0.17034590648357378,
1745
+ "grad_norm": 0.5808764628223875,
1746
+ "learning_rate": 0.0001701388888888889,
1747
+ "loss": 0.6714,
1748
+ "step": 490
1749
+ },
1750
+ {
1751
+ "epoch": 0.17104119589779246,
1752
+ "grad_norm": 0.42505370383777924,
1753
+ "learning_rate": 0.00017083333333333333,
1754
+ "loss": 0.6432,
1755
+ "step": 492
1756
+ },
1757
+ {
1758
+ "epoch": 0.17173648531201113,
1759
+ "grad_norm": 0.9403542369255028,
1760
+ "learning_rate": 0.00017152777777777777,
1761
+ "loss": 0.626,
1762
+ "step": 494
1763
+ },
1764
+ {
1765
+ "epoch": 0.17243177472622978,
1766
+ "grad_norm": 0.41714707593148775,
1767
+ "learning_rate": 0.00017222222222222224,
1768
+ "loss": 0.6132,
1769
+ "step": 496
1770
+ },
1771
+ {
1772
+ "epoch": 0.17312706414044846,
1773
+ "grad_norm": 0.6660896849726371,
1774
+ "learning_rate": 0.00017291666666666668,
1775
+ "loss": 0.7009,
1776
+ "step": 498
1777
+ },
1778
+ {
1779
+ "epoch": 0.17382235355466713,
1780
+ "grad_norm": 0.5079993412341118,
1781
+ "learning_rate": 0.00017361111111111112,
1782
+ "loss": 0.6808,
1783
+ "step": 500
1784
+ },
1785
+ {
1786
+ "epoch": 0.1745176429688858,
1787
+ "grad_norm": 0.3426487947129772,
1788
+ "learning_rate": 0.00017430555555555556,
1789
+ "loss": 0.687,
1790
+ "step": 502
1791
+ },
1792
+ {
1793
+ "epoch": 0.17521293238310448,
1794
+ "grad_norm": 0.7316051797581208,
1795
+ "learning_rate": 0.000175,
1796
+ "loss": 0.686,
1797
+ "step": 504
1798
+ },
1799
+ {
1800
+ "epoch": 0.17590822179732313,
1801
+ "grad_norm": 0.4619785653282459,
1802
+ "learning_rate": 0.00017569444444444444,
1803
+ "loss": 0.7019,
1804
+ "step": 506
1805
+ },
1806
+ {
1807
+ "epoch": 0.1766035112115418,
1808
+ "grad_norm": 0.47786977912472967,
1809
+ "learning_rate": 0.0001763888888888889,
1810
+ "loss": 0.7097,
1811
+ "step": 508
1812
+ },
1813
+ {
1814
+ "epoch": 0.17729880062576048,
1815
+ "grad_norm": 0.4204398291864812,
1816
+ "learning_rate": 0.00017708333333333335,
1817
+ "loss": 0.6715,
1818
+ "step": 510
1819
+ },
1820
+ {
1821
+ "epoch": 0.17799409003997915,
1822
+ "grad_norm": 0.5257761541709909,
1823
+ "learning_rate": 0.00017777777777777779,
1824
+ "loss": 0.6675,
1825
+ "step": 512
1826
+ },
1827
+ {
1828
+ "epoch": 0.1786893794541978,
1829
+ "grad_norm": 0.5752239420884688,
1830
+ "learning_rate": 0.00017847222222222225,
1831
+ "loss": 0.5847,
1832
+ "step": 514
1833
+ },
1834
+ {
1835
+ "epoch": 0.17938466886841647,
1836
+ "grad_norm": 0.5611432017880666,
1837
+ "learning_rate": 0.0001791666666666667,
1838
+ "loss": 0.5978,
1839
+ "step": 516
1840
+ },
1841
+ {
1842
+ "epoch": 0.18007995828263515,
1843
+ "grad_norm": 0.5147097358785945,
1844
+ "learning_rate": 0.00017986111111111113,
1845
+ "loss": 0.641,
1846
+ "step": 518
1847
+ },
1848
+ {
1849
+ "epoch": 0.18077524769685382,
1850
+ "grad_norm": 0.4387330962078066,
1851
+ "learning_rate": 0.00018055555555555557,
1852
+ "loss": 0.5771,
1853
+ "step": 520
1854
+ },
1855
+ {
1856
+ "epoch": 0.18147053711107247,
1857
+ "grad_norm": 0.5712526440788663,
1858
+ "learning_rate": 0.00018125000000000001,
1859
+ "loss": 0.6212,
1860
+ "step": 522
1861
+ },
1862
+ {
1863
+ "epoch": 0.18216582652529115,
1864
+ "grad_norm": 0.6357133907029916,
1865
+ "learning_rate": 0.00018194444444444445,
1866
+ "loss": 0.5981,
1867
+ "step": 524
1868
+ },
1869
+ {
1870
+ "epoch": 0.18286111593950982,
1871
+ "grad_norm": 1.2129620643054042,
1872
+ "learning_rate": 0.0001826388888888889,
1873
+ "loss": 0.5848,
1874
+ "step": 526
1875
+ },
1876
+ {
1877
+ "epoch": 0.1835564053537285,
1878
+ "grad_norm": 1.651599276688714,
1879
+ "learning_rate": 0.00018333333333333334,
1880
+ "loss": 0.7089,
1881
+ "step": 528
1882
+ },
1883
+ {
1884
+ "epoch": 0.18425169476794717,
1885
+ "grad_norm": 0.9051688342434142,
1886
+ "learning_rate": 0.00018402777777777778,
1887
+ "loss": 0.7255,
1888
+ "step": 530
1889
+ },
1890
+ {
1891
+ "epoch": 0.18494698418216582,
1892
+ "grad_norm": 0.6932441010863684,
1893
+ "learning_rate": 0.00018472222222222224,
1894
+ "loss": 0.6497,
1895
+ "step": 532
1896
+ },
1897
+ {
1898
+ "epoch": 0.1856422735963845,
1899
+ "grad_norm": 1.2163006408955903,
1900
+ "learning_rate": 0.00018541666666666668,
1901
+ "loss": 0.7585,
1902
+ "step": 534
1903
+ },
1904
+ {
1905
+ "epoch": 0.18633756301060317,
1906
+ "grad_norm": 0.4387074048377486,
1907
+ "learning_rate": 0.00018611111111111112,
1908
+ "loss": 0.6541,
1909
+ "step": 536
1910
+ },
1911
+ {
1912
+ "epoch": 0.18703285242482184,
1913
+ "grad_norm": 0.49430581216479236,
1914
+ "learning_rate": 0.00018680555555555556,
1915
+ "loss": 0.6489,
1916
+ "step": 538
1917
+ },
1918
+ {
1919
+ "epoch": 0.1877281418390405,
1920
+ "grad_norm": 0.47561442420926275,
1921
+ "learning_rate": 0.0001875,
1922
+ "loss": 0.6406,
1923
+ "step": 540
1924
+ },
1925
+ {
1926
+ "epoch": 0.18842343125325917,
1927
+ "grad_norm": 0.7046092923664611,
1928
+ "learning_rate": 0.00018819444444444444,
1929
+ "loss": 0.6099,
1930
+ "step": 542
1931
+ },
1932
+ {
1933
+ "epoch": 0.18911872066747784,
1934
+ "grad_norm": 0.46256136269201026,
1935
+ "learning_rate": 0.00018888888888888888,
1936
+ "loss": 0.6992,
1937
+ "step": 544
1938
+ },
1939
+ {
1940
+ "epoch": 0.18981401008169652,
1941
+ "grad_norm": 0.5489877082982352,
1942
+ "learning_rate": 0.00018958333333333332,
1943
+ "loss": 0.5817,
1944
+ "step": 546
1945
+ },
1946
+ {
1947
+ "epoch": 0.19050929949591516,
1948
+ "grad_norm": 0.46129441798908893,
1949
+ "learning_rate": 0.00019027777777777776,
1950
+ "loss": 0.645,
1951
+ "step": 548
1952
+ },
1953
+ {
1954
+ "epoch": 0.19120458891013384,
1955
+ "grad_norm": 0.3724226655450749,
1956
+ "learning_rate": 0.00019097222222222223,
1957
+ "loss": 0.6177,
1958
+ "step": 550
1959
+ },
1960
+ {
1961
+ "epoch": 0.1918998783243525,
1962
+ "grad_norm": 0.4625796323902992,
1963
+ "learning_rate": 0.00019166666666666667,
1964
+ "loss": 0.6702,
1965
+ "step": 552
1966
+ },
1967
+ {
1968
+ "epoch": 0.1925951677385712,
1969
+ "grad_norm": 1.0142124821047231,
1970
+ "learning_rate": 0.0001923611111111111,
1971
+ "loss": 0.7173,
1972
+ "step": 554
1973
+ },
1974
+ {
1975
+ "epoch": 0.19329045715278984,
1976
+ "grad_norm": 0.5452830720753045,
1977
+ "learning_rate": 0.00019305555555555558,
1978
+ "loss": 0.6897,
1979
+ "step": 556
1980
+ },
1981
+ {
1982
+ "epoch": 0.1939857465670085,
1983
+ "grad_norm": 0.5723214484723252,
1984
+ "learning_rate": 0.00019375000000000002,
1985
+ "loss": 0.6495,
1986
+ "step": 558
1987
+ },
1988
+ {
1989
+ "epoch": 0.19468103598122719,
1990
+ "grad_norm": 0.39925844595054966,
1991
+ "learning_rate": 0.00019444444444444446,
1992
+ "loss": 0.6479,
1993
+ "step": 560
1994
+ },
1995
+ {
1996
+ "epoch": 0.19537632539544586,
1997
+ "grad_norm": 0.3575565088751118,
1998
+ "learning_rate": 0.0001951388888888889,
1999
+ "loss": 0.6593,
2000
+ "step": 562
2001
+ },
2002
+ {
2003
+ "epoch": 0.19607161480966454,
2004
+ "grad_norm": 0.6119892689536569,
2005
+ "learning_rate": 0.00019583333333333334,
2006
+ "loss": 0.6831,
2007
+ "step": 564
2008
+ },
2009
+ {
2010
+ "epoch": 0.19676690422388318,
2011
+ "grad_norm": 0.5076569073121309,
2012
+ "learning_rate": 0.00019652777777777778,
2013
+ "loss": 0.6294,
2014
+ "step": 566
2015
+ },
2016
+ {
2017
+ "epoch": 0.19746219363810186,
2018
+ "grad_norm": 0.622226505121079,
2019
+ "learning_rate": 0.00019722222222222225,
2020
+ "loss": 0.5551,
2021
+ "step": 568
2022
+ },
2023
+ {
2024
+ "epoch": 0.19815748305232053,
2025
+ "grad_norm": 0.4106526912254415,
2026
+ "learning_rate": 0.0001979166666666667,
2027
+ "loss": 0.6503,
2028
+ "step": 570
2029
+ },
2030
+ {
2031
+ "epoch": 0.1988527724665392,
2032
+ "grad_norm": 0.47164751383243125,
2033
+ "learning_rate": 0.00019861111111111113,
2034
+ "loss": 0.6992,
2035
+ "step": 572
2036
+ },
2037
+ {
2038
+ "epoch": 0.19954806188075785,
2039
+ "grad_norm": 0.492328312395826,
2040
+ "learning_rate": 0.00019930555555555557,
2041
+ "loss": 0.7183,
2042
+ "step": 574
2043
+ },
2044
+ {
2045
+ "epoch": 0.20024335129497653,
2046
+ "grad_norm": 0.5087240677439067,
2047
+ "learning_rate": 0.0002,
2048
+ "loss": 0.6429,
2049
+ "step": 576
2050
+ },
2051
+ {
2052
+ "epoch": 0.2009386407091952,
2053
+ "grad_norm": 0.9311216098504759,
2054
+ "learning_rate": 0.00019999992632143608,
2055
+ "loss": 0.6586,
2056
+ "step": 578
2057
+ },
2058
+ {
2059
+ "epoch": 0.20163393012341388,
2060
+ "grad_norm": 0.38840935170189844,
2061
+ "learning_rate": 0.00019999970528585288,
2062
+ "loss": 0.5905,
2063
+ "step": 580
2064
+ },
2065
+ {
2066
+ "epoch": 0.20232921953763253,
2067
+ "grad_norm": 0.5966472765038273,
2068
+ "learning_rate": 0.0001999993368935761,
2069
+ "loss": 0.7112,
2070
+ "step": 582
2071
+ },
2072
+ {
2073
+ "epoch": 0.2030245089518512,
2074
+ "grad_norm": 0.535232520705813,
2075
+ "learning_rate": 0.00019999882114514863,
2076
+ "loss": 0.7036,
2077
+ "step": 584
2078
+ },
2079
+ {
2080
+ "epoch": 0.20371979836606988,
2081
+ "grad_norm": 0.5039916125175515,
2082
+ "learning_rate": 0.0001999981580413304,
2083
+ "loss": 0.7485,
2084
+ "step": 586
2085
+ },
2086
+ {
2087
+ "epoch": 0.20441508778028855,
2088
+ "grad_norm": 0.6733262892097686,
2089
+ "learning_rate": 0.0001999973475830986,
2090
+ "loss": 0.7312,
2091
+ "step": 588
2092
+ },
2093
+ {
2094
+ "epoch": 0.20511037719450723,
2095
+ "grad_norm": 0.42805740684614807,
2096
+ "learning_rate": 0.00019999638977164747,
2097
+ "loss": 0.6356,
2098
+ "step": 590
2099
+ },
2100
+ {
2101
+ "epoch": 0.20580566660872587,
2102
+ "grad_norm": 0.976918546573707,
2103
+ "learning_rate": 0.00019999528460838844,
2104
+ "loss": 0.666,
2105
+ "step": 592
2106
+ },
2107
+ {
2108
+ "epoch": 0.20650095602294455,
2109
+ "grad_norm": 0.674463735133944,
2110
+ "learning_rate": 0.00019999403209495,
2111
+ "loss": 0.6735,
2112
+ "step": 594
2113
+ },
2114
+ {
2115
+ "epoch": 0.20719624543716322,
2116
+ "grad_norm": 0.412232119296174,
2117
+ "learning_rate": 0.00019999263223317786,
2118
+ "loss": 0.6364,
2119
+ "step": 596
2120
+ },
2121
+ {
2122
+ "epoch": 0.2078915348513819,
2123
+ "grad_norm": 0.3611034413524545,
2124
+ "learning_rate": 0.0001999910850251348,
2125
+ "loss": 0.6368,
2126
+ "step": 598
2127
+ },
2128
+ {
2129
+ "epoch": 0.20858682426560055,
2130
+ "grad_norm": 0.542506398091656,
2131
+ "learning_rate": 0.0001999893904731007,
2132
+ "loss": 0.7292,
2133
+ "step": 600
2134
+ },
2135
+ {
2136
+ "epoch": 0.20858682426560055,
2137
+ "eval_loss": 0.6517693400382996,
2138
+ "eval_runtime": 707.4674,
2139
+ "eval_samples_per_second": 6.844,
2140
+ "eval_steps_per_second": 0.215,
2141
+ "step": 600
2142
+ },
2143
+ {
2144
+ "epoch": 0.20928211367981922,
2145
+ "grad_norm": 0.4799286290563759,
2146
+ "learning_rate": 0.0001999875485795727,
2147
+ "loss": 0.6609,
2148
+ "step": 602
2149
+ },
2150
+ {
2151
+ "epoch": 0.2099774030940379,
2152
+ "grad_norm": 0.41830446575524177,
2153
+ "learning_rate": 0.0001999855593472649,
2154
+ "loss": 0.6084,
2155
+ "step": 604
2156
+ },
2157
+ {
2158
+ "epoch": 0.21067269250825657,
2159
+ "grad_norm": 0.8865562086549428,
2160
+ "learning_rate": 0.00019998342277910856,
2161
+ "loss": 0.6204,
2162
+ "step": 606
2163
+ },
2164
+ {
2165
+ "epoch": 0.21136798192247522,
2166
+ "grad_norm": 0.5567410910026902,
2167
+ "learning_rate": 0.00019998113887825206,
2168
+ "loss": 0.7087,
2169
+ "step": 608
2170
+ },
2171
+ {
2172
+ "epoch": 0.2120632713366939,
2173
+ "grad_norm": 0.4229098163428897,
2174
+ "learning_rate": 0.000199978707648061,
2175
+ "loss": 0.6403,
2176
+ "step": 610
2177
+ },
2178
+ {
2179
+ "epoch": 0.21275856075091257,
2180
+ "grad_norm": 0.7793555918286379,
2181
+ "learning_rate": 0.00019997612909211784,
2182
+ "loss": 0.6147,
2183
+ "step": 612
2184
+ },
2185
+ {
2186
+ "epoch": 0.21345385016513124,
2187
+ "grad_norm": 0.49302383416102263,
2188
+ "learning_rate": 0.00019997340321422228,
2189
+ "loss": 0.5771,
2190
+ "step": 614
2191
+ },
2192
+ {
2193
+ "epoch": 0.21414913957934992,
2194
+ "grad_norm": 1.0807551778698754,
2195
+ "learning_rate": 0.00019997053001839115,
2196
+ "loss": 0.6538,
2197
+ "step": 616
2198
+ },
2199
+ {
2200
+ "epoch": 0.21484442899356856,
2201
+ "grad_norm": 0.5935042706034646,
2202
+ "learning_rate": 0.0001999675095088583,
2203
+ "loss": 0.6058,
2204
+ "step": 618
2205
+ },
2206
+ {
2207
+ "epoch": 0.21553971840778724,
2208
+ "grad_norm": 0.5151203960605611,
2209
+ "learning_rate": 0.00019996434169007468,
2210
+ "loss": 0.6138,
2211
+ "step": 620
2212
+ },
2213
+ {
2214
+ "epoch": 0.21623500782200591,
2215
+ "grad_norm": 0.7837579499467073,
2216
+ "learning_rate": 0.00019996102656670824,
2217
+ "loss": 0.657,
2218
+ "step": 622
2219
+ },
2220
+ {
2221
+ "epoch": 0.2169302972362246,
2222
+ "grad_norm": 0.5438202413527335,
2223
+ "learning_rate": 0.0001999575641436441,
2224
+ "loss": 0.6642,
2225
+ "step": 624
2226
+ },
2227
+ {
2228
+ "epoch": 0.21762558665044324,
2229
+ "grad_norm": 0.5566641749580342,
2230
+ "learning_rate": 0.00019995395442598435,
2231
+ "loss": 0.6813,
2232
+ "step": 626
2233
+ },
2234
+ {
2235
+ "epoch": 0.2183208760646619,
2236
+ "grad_norm": 0.9345190838089675,
2237
+ "learning_rate": 0.0001999501974190482,
2238
+ "loss": 0.684,
2239
+ "step": 628
2240
+ },
2241
+ {
2242
+ "epoch": 0.2190161654788806,
2243
+ "grad_norm": 0.48677420225080537,
2244
+ "learning_rate": 0.00019994629312837186,
2245
+ "loss": 0.6481,
2246
+ "step": 630
2247
+ },
2248
+ {
2249
+ "epoch": 0.21971145489309926,
2250
+ "grad_norm": 1.3826373483976924,
2251
+ "learning_rate": 0.00019994224155970856,
2252
+ "loss": 0.7222,
2253
+ "step": 632
2254
+ },
2255
+ {
2256
+ "epoch": 0.2204067443073179,
2257
+ "grad_norm": 0.9031189580678588,
2258
+ "learning_rate": 0.00019993804271902857,
2259
+ "loss": 0.6809,
2260
+ "step": 634
2261
+ },
2262
+ {
2263
+ "epoch": 0.22110203372153658,
2264
+ "grad_norm": 0.5674519795217096,
2265
+ "learning_rate": 0.00019993369661251923,
2266
+ "loss": 0.6184,
2267
+ "step": 636
2268
+ },
2269
+ {
2270
+ "epoch": 0.22179732313575526,
2271
+ "grad_norm": 2.5294344265719135,
2272
+ "learning_rate": 0.0001999292032465848,
2273
+ "loss": 0.7686,
2274
+ "step": 638
2275
+ },
2276
+ {
2277
+ "epoch": 0.22249261254997393,
2278
+ "grad_norm": 2.8855656605632047,
2279
+ "learning_rate": 0.00019992456262784658,
2280
+ "loss": 0.8215,
2281
+ "step": 640
2282
+ },
2283
+ {
2284
+ "epoch": 0.2231879019641926,
2285
+ "grad_norm": 1.5331524159142917,
2286
+ "learning_rate": 0.00019991977476314286,
2287
+ "loss": 0.7057,
2288
+ "step": 642
2289
+ },
2290
+ {
2291
+ "epoch": 0.22388319137841126,
2292
+ "grad_norm": 0.8086145005161695,
2293
+ "learning_rate": 0.0001999148396595289,
2294
+ "loss": 0.6289,
2295
+ "step": 644
2296
+ },
2297
+ {
2298
+ "epoch": 0.22457848079262993,
2299
+ "grad_norm": 0.767389193699688,
2300
+ "learning_rate": 0.00019990975732427693,
2301
+ "loss": 0.6117,
2302
+ "step": 646
2303
+ },
2304
+ {
2305
+ "epoch": 0.2252737702068486,
2306
+ "grad_norm": 0.9275190697426144,
2307
+ "learning_rate": 0.0001999045277648761,
2308
+ "loss": 0.6582,
2309
+ "step": 648
2310
+ },
2311
+ {
2312
+ "epoch": 0.22596905962106728,
2313
+ "grad_norm": 0.8004230202091956,
2314
+ "learning_rate": 0.00019989915098903257,
2315
+ "loss": 0.5387,
2316
+ "step": 650
2317
+ },
2318
+ {
2319
+ "epoch": 0.22666434903528593,
2320
+ "grad_norm": 0.8210833181388095,
2321
+ "learning_rate": 0.0001998936270046694,
2322
+ "loss": 0.5941,
2323
+ "step": 652
2324
+ },
2325
+ {
2326
+ "epoch": 0.2273596384495046,
2327
+ "grad_norm": 2.1189675938584616,
2328
+ "learning_rate": 0.00019988795581992656,
2329
+ "loss": 0.7504,
2330
+ "step": 654
2331
+ },
2332
+ {
2333
+ "epoch": 0.22805492786372328,
2334
+ "grad_norm": 0.5952032026226816,
2335
+ "learning_rate": 0.000199882137443161,
2336
+ "loss": 0.6643,
2337
+ "step": 656
2338
+ },
2339
+ {
2340
+ "epoch": 0.22875021727794195,
2341
+ "grad_norm": 0.40481972159485846,
2342
+ "learning_rate": 0.00019987617188294642,
2343
+ "loss": 0.5225,
2344
+ "step": 658
2345
+ },
2346
+ {
2347
+ "epoch": 0.2294455066921606,
2348
+ "grad_norm": 0.7478677085785272,
2349
+ "learning_rate": 0.00019987005914807356,
2350
+ "loss": 0.6561,
2351
+ "step": 660
2352
+ },
2353
+ {
2354
+ "epoch": 0.23014079610637928,
2355
+ "grad_norm": 0.4468692254604696,
2356
+ "learning_rate": 0.00019986379924754997,
2357
+ "loss": 0.6262,
2358
+ "step": 662
2359
+ },
2360
+ {
2361
+ "epoch": 0.23083608552059795,
2362
+ "grad_norm": 0.46697718493106893,
2363
+ "learning_rate": 0.00019985739219060002,
2364
+ "loss": 0.6178,
2365
+ "step": 664
2366
+ },
2367
+ {
2368
+ "epoch": 0.23153137493481663,
2369
+ "grad_norm": 0.388728867903269,
2370
+ "learning_rate": 0.000199850837986665,
2371
+ "loss": 0.5544,
2372
+ "step": 666
2373
+ },
2374
+ {
2375
+ "epoch": 0.23222666434903527,
2376
+ "grad_norm": 0.5731403139849517,
2377
+ "learning_rate": 0.000199844136645403,
2378
+ "loss": 0.5906,
2379
+ "step": 668
2380
+ },
2381
+ {
2382
+ "epoch": 0.23292195376325395,
2383
+ "grad_norm": 0.7601215827849259,
2384
+ "learning_rate": 0.0001998372881766889,
2385
+ "loss": 0.6577,
2386
+ "step": 670
2387
+ },
2388
+ {
2389
+ "epoch": 0.23361724317747262,
2390
+ "grad_norm": 0.8997786413980279,
2391
+ "learning_rate": 0.00019983029259061446,
2392
+ "loss": 0.6388,
2393
+ "step": 672
2394
+ },
2395
+ {
2396
+ "epoch": 0.2343125325916913,
2397
+ "grad_norm": 0.519349430301096,
2398
+ "learning_rate": 0.00019982314989748813,
2399
+ "loss": 0.6561,
2400
+ "step": 674
2401
+ },
2402
+ {
2403
+ "epoch": 0.23500782200590997,
2404
+ "grad_norm": 0.4918913590728881,
2405
+ "learning_rate": 0.00019981586010783513,
2406
+ "loss": 0.6176,
2407
+ "step": 676
2408
+ },
2409
+ {
2410
+ "epoch": 0.23570311142012862,
2411
+ "grad_norm": 0.5324672662478441,
2412
+ "learning_rate": 0.00019980842323239756,
2413
+ "loss": 0.5801,
2414
+ "step": 678
2415
+ },
2416
+ {
2417
+ "epoch": 0.2363984008343473,
2418
+ "grad_norm": 0.6375745879180794,
2419
+ "learning_rate": 0.00019980083928213417,
2420
+ "loss": 0.6885,
2421
+ "step": 680
2422
+ },
2423
+ {
2424
+ "epoch": 0.23709369024856597,
2425
+ "grad_norm": 0.4317997282790841,
2426
+ "learning_rate": 0.00019979310826822046,
2427
+ "loss": 0.5727,
2428
+ "step": 682
2429
+ },
2430
+ {
2431
+ "epoch": 0.23778897966278464,
2432
+ "grad_norm": 0.7933828103238656,
2433
+ "learning_rate": 0.00019978523020204857,
2434
+ "loss": 0.6703,
2435
+ "step": 684
2436
+ },
2437
+ {
2438
+ "epoch": 0.2384842690770033,
2439
+ "grad_norm": 0.5465102255190097,
2440
+ "learning_rate": 0.00019977720509522747,
2441
+ "loss": 0.6675,
2442
+ "step": 686
2443
+ },
2444
+ {
2445
+ "epoch": 0.23917955849122197,
2446
+ "grad_norm": 0.8356137363612883,
2447
+ "learning_rate": 0.00019976903295958269,
2448
+ "loss": 0.6558,
2449
+ "step": 688
2450
+ },
2451
+ {
2452
+ "epoch": 0.23987484790544064,
2453
+ "grad_norm": 0.5190073187676806,
2454
+ "learning_rate": 0.00019976071380715645,
2455
+ "loss": 0.6779,
2456
+ "step": 690
2457
+ },
2458
+ {
2459
+ "epoch": 0.24057013731965932,
2460
+ "grad_norm": 0.40561527338007225,
2461
+ "learning_rate": 0.0001997522476502076,
2462
+ "loss": 0.6687,
2463
+ "step": 692
2464
+ },
2465
+ {
2466
+ "epoch": 0.24126542673387796,
2467
+ "grad_norm": 0.592821589706298,
2468
+ "learning_rate": 0.0001997436345012117,
2469
+ "loss": 0.6807,
2470
+ "step": 694
2471
+ },
2472
+ {
2473
+ "epoch": 0.24196071614809664,
2474
+ "grad_norm": 0.686918345062074,
2475
+ "learning_rate": 0.00019973487437286073,
2476
+ "loss": 0.6831,
2477
+ "step": 696
2478
+ },
2479
+ {
2480
+ "epoch": 0.2426560055623153,
2481
+ "grad_norm": 0.964075508243471,
2482
+ "learning_rate": 0.00019972596727806346,
2483
+ "loss": 0.665,
2484
+ "step": 698
2485
+ },
2486
+ {
2487
+ "epoch": 0.243351294976534,
2488
+ "grad_norm": 0.739137434656244,
2489
+ "learning_rate": 0.00019971691322994507,
2490
+ "loss": 0.6387,
2491
+ "step": 700
2492
+ },
2493
+ {
2494
+ "epoch": 0.24404658439075266,
2495
+ "grad_norm": 0.3978099419540589,
2496
+ "learning_rate": 0.00019970771224184737,
2497
+ "loss": 0.6143,
2498
+ "step": 702
2499
+ },
2500
+ {
2501
+ "epoch": 0.2447418738049713,
2502
+ "grad_norm": 0.4997978607153984,
2503
+ "learning_rate": 0.0001996983643273287,
2504
+ "loss": 0.6083,
2505
+ "step": 704
2506
+ },
2507
+ {
2508
+ "epoch": 0.24543716321918999,
2509
+ "grad_norm": 1.3443518380439796,
2510
+ "learning_rate": 0.0001996888695001638,
2511
+ "loss": 0.6711,
2512
+ "step": 706
2513
+ },
2514
+ {
2515
+ "epoch": 0.24613245263340866,
2516
+ "grad_norm": 0.5424379336505594,
2517
+ "learning_rate": 0.000199679227774344,
2518
+ "loss": 0.6321,
2519
+ "step": 708
2520
+ },
2521
+ {
2522
+ "epoch": 0.24682774204762734,
2523
+ "grad_norm": 0.7689235498835658,
2524
+ "learning_rate": 0.00019966943916407712,
2525
+ "loss": 0.6918,
2526
+ "step": 710
2527
+ },
2528
+ {
2529
+ "epoch": 0.24752303146184598,
2530
+ "grad_norm": 0.6467240394468301,
2531
+ "learning_rate": 0.00019965950368378734,
2532
+ "loss": 0.6368,
2533
+ "step": 712
2534
+ },
2535
+ {
2536
+ "epoch": 0.24821832087606466,
2537
+ "grad_norm": 0.48819289451999526,
2538
+ "learning_rate": 0.00019964942134811532,
2539
+ "loss": 0.5875,
2540
+ "step": 714
2541
+ },
2542
+ {
2543
+ "epoch": 0.24891361029028333,
2544
+ "grad_norm": 1.1929427463467126,
2545
+ "learning_rate": 0.00019963919217191807,
2546
+ "loss": 0.6795,
2547
+ "step": 716
2548
+ },
2549
+ {
2550
+ "epoch": 0.249608899704502,
2551
+ "grad_norm": 0.4182713825442439,
2552
+ "learning_rate": 0.00019962881617026902,
2553
+ "loss": 0.5835,
2554
+ "step": 718
2555
+ },
2556
+ {
2557
+ "epoch": 0.25030418911872065,
2558
+ "grad_norm": 0.5770262516296942,
2559
+ "learning_rate": 0.00019961829335845795,
2560
+ "loss": 0.6299,
2561
+ "step": 720
2562
+ },
2563
+ {
2564
+ "epoch": 0.25099947853293936,
2565
+ "grad_norm": 0.9135250550485389,
2566
+ "learning_rate": 0.00019960762375199095,
2567
+ "loss": 0.6844,
2568
+ "step": 722
2569
+ },
2570
+ {
2571
+ "epoch": 0.251694767947158,
2572
+ "grad_norm": 0.5511234805517417,
2573
+ "learning_rate": 0.0001995968073665905,
2574
+ "loss": 0.5745,
2575
+ "step": 724
2576
+ },
2577
+ {
2578
+ "epoch": 0.25239005736137665,
2579
+ "grad_norm": 0.49137872896877094,
2580
+ "learning_rate": 0.00019958584421819528,
2581
+ "loss": 0.698,
2582
+ "step": 726
2583
+ },
2584
+ {
2585
+ "epoch": 0.25308534677559535,
2586
+ "grad_norm": 0.4437253322782162,
2587
+ "learning_rate": 0.00019957473432296026,
2588
+ "loss": 0.6864,
2589
+ "step": 728
2590
+ },
2591
+ {
2592
+ "epoch": 0.253780636189814,
2593
+ "grad_norm": 0.5064381489074585,
2594
+ "learning_rate": 0.0001995634776972567,
2595
+ "loss": 0.5758,
2596
+ "step": 730
2597
+ },
2598
+ {
2599
+ "epoch": 0.2544759256040327,
2600
+ "grad_norm": 0.5464655664741616,
2601
+ "learning_rate": 0.00019955207435767201,
2602
+ "loss": 0.6351,
2603
+ "step": 732
2604
+ },
2605
+ {
2606
+ "epoch": 0.25517121501825135,
2607
+ "grad_norm": 0.44060524677853835,
2608
+ "learning_rate": 0.00019954052432100982,
2609
+ "loss": 0.6837,
2610
+ "step": 734
2611
+ },
2612
+ {
2613
+ "epoch": 0.25586650443247,
2614
+ "grad_norm": 0.942390696980061,
2615
+ "learning_rate": 0.00019952882760428998,
2616
+ "loss": 0.6378,
2617
+ "step": 736
2618
+ },
2619
+ {
2620
+ "epoch": 0.2565617938466887,
2621
+ "grad_norm": 0.4710023299938762,
2622
+ "learning_rate": 0.00019951698422474836,
2623
+ "loss": 0.6968,
2624
+ "step": 738
2625
+ },
2626
+ {
2627
+ "epoch": 0.25725708326090735,
2628
+ "grad_norm": 0.4179809222387651,
2629
+ "learning_rate": 0.00019950499419983707,
2630
+ "loss": 0.5643,
2631
+ "step": 740
2632
+ },
2633
+ {
2634
+ "epoch": 0.257952372675126,
2635
+ "grad_norm": 0.4446278865199289,
2636
+ "learning_rate": 0.00019949285754722426,
2637
+ "loss": 0.5089,
2638
+ "step": 742
2639
+ },
2640
+ {
2641
+ "epoch": 0.2586476620893447,
2642
+ "grad_norm": 0.914728851966275,
2643
+ "learning_rate": 0.00019948057428479418,
2644
+ "loss": 0.7933,
2645
+ "step": 744
2646
+ },
2647
+ {
2648
+ "epoch": 0.25934295150356335,
2649
+ "grad_norm": 0.47142275185055055,
2650
+ "learning_rate": 0.00019946814443064703,
2651
+ "loss": 0.6384,
2652
+ "step": 746
2653
+ },
2654
+ {
2655
+ "epoch": 0.26003824091778205,
2656
+ "grad_norm": 0.720916103725334,
2657
+ "learning_rate": 0.00019945556800309917,
2658
+ "loss": 0.5957,
2659
+ "step": 748
2660
+ },
2661
+ {
2662
+ "epoch": 0.2607335303320007,
2663
+ "grad_norm": 0.8550360126036364,
2664
+ "learning_rate": 0.00019944284502068275,
2665
+ "loss": 0.6454,
2666
+ "step": 750
2667
+ },
2668
+ {
2669
+ "epoch": 0.2607335303320007,
2670
+ "eval_loss": 0.6373963952064514,
2671
+ "eval_runtime": 728.7951,
2672
+ "eval_samples_per_second": 6.644,
2673
+ "eval_steps_per_second": 0.209,
2674
+ "step": 750
2675
+ },
2676
+ {
2677
+ "epoch": 0.26142881974621934,
2678
+ "grad_norm": 0.5467512313200382,
2679
+ "learning_rate": 0.0001994299755021461,
2680
+ "loss": 0.5781,
2681
+ "step": 752
2682
+ },
2683
+ {
2684
+ "epoch": 0.26212410916043805,
2685
+ "grad_norm": 0.41230384162224254,
2686
+ "learning_rate": 0.0001994169594664533,
2687
+ "loss": 0.6467,
2688
+ "step": 754
2689
+ },
2690
+ {
2691
+ "epoch": 0.2628193985746567,
2692
+ "grad_norm": 0.5635895964904281,
2693
+ "learning_rate": 0.00019940379693278448,
2694
+ "loss": 0.6182,
2695
+ "step": 756
2696
+ },
2697
+ {
2698
+ "epoch": 0.2635146879888754,
2699
+ "grad_norm": 0.6456168984233462,
2700
+ "learning_rate": 0.0001993904879205355,
2701
+ "loss": 0.6086,
2702
+ "step": 758
2703
+ },
2704
+ {
2705
+ "epoch": 0.26420997740309404,
2706
+ "grad_norm": 0.5060399335833471,
2707
+ "learning_rate": 0.00019937703244931815,
2708
+ "loss": 0.6407,
2709
+ "step": 760
2710
+ },
2711
+ {
2712
+ "epoch": 0.2649052668173127,
2713
+ "grad_norm": 0.5033297464646784,
2714
+ "learning_rate": 0.00019936343053896004,
2715
+ "loss": 0.6157,
2716
+ "step": 762
2717
+ },
2718
+ {
2719
+ "epoch": 0.2656005562315314,
2720
+ "grad_norm": 0.3911665523252255,
2721
+ "learning_rate": 0.00019934968220950458,
2722
+ "loss": 0.6227,
2723
+ "step": 764
2724
+ },
2725
+ {
2726
+ "epoch": 0.26629584564575004,
2727
+ "grad_norm": 1.7130937466256138,
2728
+ "learning_rate": 0.00019933578748121086,
2729
+ "loss": 0.5994,
2730
+ "step": 766
2731
+ },
2732
+ {
2733
+ "epoch": 0.2669911350599687,
2734
+ "grad_norm": 0.5747338886603724,
2735
+ "learning_rate": 0.00019932174637455382,
2736
+ "loss": 0.6238,
2737
+ "step": 768
2738
+ },
2739
+ {
2740
+ "epoch": 0.2676864244741874,
2741
+ "grad_norm": 0.6226324357252033,
2742
+ "learning_rate": 0.00019930755891022398,
2743
+ "loss": 0.6255,
2744
+ "step": 770
2745
+ },
2746
+ {
2747
+ "epoch": 0.26838171388840604,
2748
+ "grad_norm": 0.5282488794229044,
2749
+ "learning_rate": 0.00019929322510912756,
2750
+ "loss": 0.5808,
2751
+ "step": 772
2752
+ },
2753
+ {
2754
+ "epoch": 0.26907700330262474,
2755
+ "grad_norm": 0.5596904577167024,
2756
+ "learning_rate": 0.0001992787449923865,
2757
+ "loss": 0.5361,
2758
+ "step": 774
2759
+ },
2760
+ {
2761
+ "epoch": 0.2697722927168434,
2762
+ "grad_norm": 1.0477422618379753,
2763
+ "learning_rate": 0.00019926411858133824,
2764
+ "loss": 0.54,
2765
+ "step": 776
2766
+ },
2767
+ {
2768
+ "epoch": 0.27046758213106203,
2769
+ "grad_norm": 1.1314704495635823,
2770
+ "learning_rate": 0.00019924934589753582,
2771
+ "loss": 0.5869,
2772
+ "step": 778
2773
+ },
2774
+ {
2775
+ "epoch": 0.27116287154528074,
2776
+ "grad_norm": 0.5426548544381972,
2777
+ "learning_rate": 0.00019923442696274794,
2778
+ "loss": 0.5951,
2779
+ "step": 780
2780
+ },
2781
+ {
2782
+ "epoch": 0.2718581609594994,
2783
+ "grad_norm": 0.8480025293040686,
2784
+ "learning_rate": 0.00019921936179895862,
2785
+ "loss": 0.6003,
2786
+ "step": 782
2787
+ },
2788
+ {
2789
+ "epoch": 0.2725534503737181,
2790
+ "grad_norm": 0.5729452380906337,
2791
+ "learning_rate": 0.0001992041504283675,
2792
+ "loss": 0.6526,
2793
+ "step": 784
2794
+ },
2795
+ {
2796
+ "epoch": 0.27324873978793673,
2797
+ "grad_norm": 0.7362085286496177,
2798
+ "learning_rate": 0.00019918879287338957,
2799
+ "loss": 0.6776,
2800
+ "step": 786
2801
+ },
2802
+ {
2803
+ "epoch": 0.2739440292021554,
2804
+ "grad_norm": 0.7440059372391256,
2805
+ "learning_rate": 0.00019917328915665535,
2806
+ "loss": 0.6486,
2807
+ "step": 788
2808
+ },
2809
+ {
2810
+ "epoch": 0.2746393186163741,
2811
+ "grad_norm": 0.6110842206790659,
2812
+ "learning_rate": 0.0001991576393010106,
2813
+ "loss": 0.6226,
2814
+ "step": 790
2815
+ },
2816
+ {
2817
+ "epoch": 0.27533460803059273,
2818
+ "grad_norm": 0.8002336898560896,
2819
+ "learning_rate": 0.00019914184332951655,
2820
+ "loss": 0.6215,
2821
+ "step": 792
2822
+ },
2823
+ {
2824
+ "epoch": 0.2760298974448114,
2825
+ "grad_norm": 0.6761736356192558,
2826
+ "learning_rate": 0.00019912590126544964,
2827
+ "loss": 0.5988,
2828
+ "step": 794
2829
+ },
2830
+ {
2831
+ "epoch": 0.2767251868590301,
2832
+ "grad_norm": 1.1005016630123619,
2833
+ "learning_rate": 0.00019910981313230172,
2834
+ "loss": 0.6213,
2835
+ "step": 796
2836
+ },
2837
+ {
2838
+ "epoch": 0.27742047627324873,
2839
+ "grad_norm": 0.6513452605734942,
2840
+ "learning_rate": 0.00019909357895377973,
2841
+ "loss": 0.682,
2842
+ "step": 798
2843
+ },
2844
+ {
2845
+ "epoch": 0.27811576568746743,
2846
+ "grad_norm": 0.8536492677008404,
2847
+ "learning_rate": 0.0001990771987538059,
2848
+ "loss": 0.707,
2849
+ "step": 800
2850
+ },
2851
+ {
2852
+ "epoch": 0.2788110551016861,
2853
+ "grad_norm": 0.3362293750538688,
2854
+ "learning_rate": 0.00019906067255651765,
2855
+ "loss": 0.7108,
2856
+ "step": 802
2857
+ },
2858
+ {
2859
+ "epoch": 0.2795063445159047,
2860
+ "grad_norm": 0.4792899916449288,
2861
+ "learning_rate": 0.0001990440003862675,
2862
+ "loss": 0.6055,
2863
+ "step": 804
2864
+ },
2865
+ {
2866
+ "epoch": 0.28020163393012343,
2867
+ "grad_norm": 1.272937677307704,
2868
+ "learning_rate": 0.00019902718226762304,
2869
+ "loss": 0.6816,
2870
+ "step": 806
2871
+ },
2872
+ {
2873
+ "epoch": 0.2808969233443421,
2874
+ "grad_norm": 0.30646692495778655,
2875
+ "learning_rate": 0.00019901021822536704,
2876
+ "loss": 0.688,
2877
+ "step": 808
2878
+ },
2879
+ {
2880
+ "epoch": 0.2815922127585608,
2881
+ "grad_norm": 0.3035733218665055,
2882
+ "learning_rate": 0.00019899310828449713,
2883
+ "loss": 0.6746,
2884
+ "step": 810
2885
+ },
2886
+ {
2887
+ "epoch": 0.2822875021727794,
2888
+ "grad_norm": 0.2763276026826459,
2889
+ "learning_rate": 0.00019897585247022613,
2890
+ "loss": 0.6632,
2891
+ "step": 812
2892
+ },
2893
+ {
2894
+ "epoch": 0.2829827915869981,
2895
+ "grad_norm": 1.173118337900773,
2896
+ "learning_rate": 0.00019895845080798166,
2897
+ "loss": 0.6306,
2898
+ "step": 814
2899
+ },
2900
+ {
2901
+ "epoch": 0.2836780810012168,
2902
+ "grad_norm": 0.4335278252167635,
2903
+ "learning_rate": 0.0001989409033234063,
2904
+ "loss": 0.6147,
2905
+ "step": 816
2906
+ },
2907
+ {
2908
+ "epoch": 0.2843733704154354,
2909
+ "grad_norm": 0.3556694503062785,
2910
+ "learning_rate": 0.00019892321004235755,
2911
+ "loss": 0.5771,
2912
+ "step": 818
2913
+ },
2914
+ {
2915
+ "epoch": 0.28506865982965407,
2916
+ "grad_norm": 0.9083240381897224,
2917
+ "learning_rate": 0.00019890537099090768,
2918
+ "loss": 0.5729,
2919
+ "step": 820
2920
+ },
2921
+ {
2922
+ "epoch": 0.2857639492438728,
2923
+ "grad_norm": 0.5433346815141633,
2924
+ "learning_rate": 0.00019888738619534385,
2925
+ "loss": 0.5554,
2926
+ "step": 822
2927
+ },
2928
+ {
2929
+ "epoch": 0.2864592386580914,
2930
+ "grad_norm": 0.5244705156138804,
2931
+ "learning_rate": 0.0001988692556821679,
2932
+ "loss": 0.6525,
2933
+ "step": 824
2934
+ },
2935
+ {
2936
+ "epoch": 0.2871545280723101,
2937
+ "grad_norm": 0.7580866792170871,
2938
+ "learning_rate": 0.00019885097947809648,
2939
+ "loss": 0.6512,
2940
+ "step": 826
2941
+ },
2942
+ {
2943
+ "epoch": 0.28784981748652877,
2944
+ "grad_norm": 0.7034554538404351,
2945
+ "learning_rate": 0.00019883255761006082,
2946
+ "loss": 0.5414,
2947
+ "step": 828
2948
+ },
2949
+ {
2950
+ "epoch": 0.2885451069007474,
2951
+ "grad_norm": 0.6915575597289163,
2952
+ "learning_rate": 0.00019881399010520688,
2953
+ "loss": 0.6036,
2954
+ "step": 830
2955
+ },
2956
+ {
2957
+ "epoch": 0.2892403963149661,
2958
+ "grad_norm": 0.6895372001781882,
2959
+ "learning_rate": 0.00019879527699089524,
2960
+ "loss": 0.5894,
2961
+ "step": 832
2962
+ },
2963
+ {
2964
+ "epoch": 0.28993568572918477,
2965
+ "grad_norm": 0.7762412863407715,
2966
+ "learning_rate": 0.00019877641829470094,
2967
+ "loss": 0.7115,
2968
+ "step": 834
2969
+ },
2970
+ {
2971
+ "epoch": 0.29063097514340347,
2972
+ "grad_norm": 0.5761220663639801,
2973
+ "learning_rate": 0.00019875741404441367,
2974
+ "loss": 0.6108,
2975
+ "step": 836
2976
+ },
2977
+ {
2978
+ "epoch": 0.2913262645576221,
2979
+ "grad_norm": 0.47176943357070505,
2980
+ "learning_rate": 0.00019873826426803755,
2981
+ "loss": 0.634,
2982
+ "step": 838
2983
+ },
2984
+ {
2985
+ "epoch": 0.29202155397184076,
2986
+ "grad_norm": 0.5985873148196751,
2987
+ "learning_rate": 0.00019871896899379107,
2988
+ "loss": 0.6528,
2989
+ "step": 840
2990
+ },
2991
+ {
2992
+ "epoch": 0.29271684338605947,
2993
+ "grad_norm": 0.39514741111190665,
2994
+ "learning_rate": 0.00019869952825010727,
2995
+ "loss": 0.6034,
2996
+ "step": 842
2997
+ },
2998
+ {
2999
+ "epoch": 0.2934121328002781,
3000
+ "grad_norm": 0.29787585594263405,
3001
+ "learning_rate": 0.00019867994206563343,
3002
+ "loss": 0.6063,
3003
+ "step": 844
3004
+ },
3005
+ {
3006
+ "epoch": 0.29410742221449676,
3007
+ "grad_norm": 0.303320759302155,
3008
+ "learning_rate": 0.00019866021046923118,
3009
+ "loss": 0.6343,
3010
+ "step": 846
3011
+ },
3012
+ {
3013
+ "epoch": 0.29480271162871546,
3014
+ "grad_norm": 0.33135450527244925,
3015
+ "learning_rate": 0.00019864033348997645,
3016
+ "loss": 0.6421,
3017
+ "step": 848
3018
+ },
3019
+ {
3020
+ "epoch": 0.2954980010429341,
3021
+ "grad_norm": 0.553668190192523,
3022
+ "learning_rate": 0.0001986203111571594,
3023
+ "loss": 0.6503,
3024
+ "step": 850
3025
+ },
3026
+ {
3027
+ "epoch": 0.2961932904571528,
3028
+ "grad_norm": 0.31948016623126946,
3029
+ "learning_rate": 0.00019860014350028438,
3030
+ "loss": 0.6259,
3031
+ "step": 852
3032
+ },
3033
+ {
3034
+ "epoch": 0.29688857987137146,
3035
+ "grad_norm": 0.5325237443938606,
3036
+ "learning_rate": 0.0001985798305490698,
3037
+ "loss": 0.6207,
3038
+ "step": 854
3039
+ },
3040
+ {
3041
+ "epoch": 0.2975838692855901,
3042
+ "grad_norm": 0.5093186589927414,
3043
+ "learning_rate": 0.00019855937233344831,
3044
+ "loss": 0.5397,
3045
+ "step": 856
3046
+ },
3047
+ {
3048
+ "epoch": 0.2982791586998088,
3049
+ "grad_norm": 0.5220573948537062,
3050
+ "learning_rate": 0.00019853876888356652,
3051
+ "loss": 0.6237,
3052
+ "step": 858
3053
+ },
3054
+ {
3055
+ "epoch": 0.29897444811402746,
3056
+ "grad_norm": 0.9861332892020862,
3057
+ "learning_rate": 0.00019851802022978506,
3058
+ "loss": 0.689,
3059
+ "step": 860
3060
+ },
3061
+ {
3062
+ "epoch": 0.2996697375282461,
3063
+ "grad_norm": 0.464669721879274,
3064
+ "learning_rate": 0.00019849712640267861,
3065
+ "loss": 0.522,
3066
+ "step": 862
3067
+ },
3068
+ {
3069
+ "epoch": 0.3003650269424648,
3070
+ "grad_norm": 0.9223435358714303,
3071
+ "learning_rate": 0.00019847608743303567,
3072
+ "loss": 0.7491,
3073
+ "step": 864
3074
+ },
3075
+ {
3076
+ "epoch": 0.30106031635668346,
3077
+ "grad_norm": 0.5058775377593727,
3078
+ "learning_rate": 0.00019845490335185866,
3079
+ "loss": 0.562,
3080
+ "step": 866
3081
+ },
3082
+ {
3083
+ "epoch": 0.30175560577090216,
3084
+ "grad_norm": 0.5517767811356443,
3085
+ "learning_rate": 0.00019843357419036382,
3086
+ "loss": 0.6162,
3087
+ "step": 868
3088
+ },
3089
+ {
3090
+ "epoch": 0.3024508951851208,
3091
+ "grad_norm": 0.49255497197537723,
3092
+ "learning_rate": 0.00019841209997998127,
3093
+ "loss": 0.6803,
3094
+ "step": 870
3095
+ },
3096
+ {
3097
+ "epoch": 0.30314618459933945,
3098
+ "grad_norm": 0.41553745561512617,
3099
+ "learning_rate": 0.0001983904807523547,
3100
+ "loss": 0.6415,
3101
+ "step": 872
3102
+ },
3103
+ {
3104
+ "epoch": 0.30384147401355815,
3105
+ "grad_norm": 0.49551628457734653,
3106
+ "learning_rate": 0.00019836871653934162,
3107
+ "loss": 0.6176,
3108
+ "step": 874
3109
+ },
3110
+ {
3111
+ "epoch": 0.3045367634277768,
3112
+ "grad_norm": 0.7489091107060393,
3113
+ "learning_rate": 0.00019834680737301313,
3114
+ "loss": 0.6337,
3115
+ "step": 876
3116
+ },
3117
+ {
3118
+ "epoch": 0.3052320528419955,
3119
+ "grad_norm": 0.32312869533576805,
3120
+ "learning_rate": 0.00019832475328565398,
3121
+ "loss": 0.6135,
3122
+ "step": 878
3123
+ },
3124
+ {
3125
+ "epoch": 0.30592734225621415,
3126
+ "grad_norm": 0.304002075332943,
3127
+ "learning_rate": 0.00019830255430976242,
3128
+ "loss": 0.5533,
3129
+ "step": 880
3130
+ },
3131
+ {
3132
+ "epoch": 0.3066226316704328,
3133
+ "grad_norm": 0.4137621036041215,
3134
+ "learning_rate": 0.00019828021047805022,
3135
+ "loss": 0.573,
3136
+ "step": 882
3137
+ },
3138
+ {
3139
+ "epoch": 0.3073179210846515,
3140
+ "grad_norm": 0.7506870255042438,
3141
+ "learning_rate": 0.00019825772182344262,
3142
+ "loss": 0.6971,
3143
+ "step": 884
3144
+ },
3145
+ {
3146
+ "epoch": 0.30801321049887015,
3147
+ "grad_norm": 0.7069489041589112,
3148
+ "learning_rate": 0.00019823508837907828,
3149
+ "loss": 0.5848,
3150
+ "step": 886
3151
+ },
3152
+ {
3153
+ "epoch": 0.3087084999130888,
3154
+ "grad_norm": 0.49383355752727304,
3155
+ "learning_rate": 0.00019821231017830914,
3156
+ "loss": 0.6349,
3157
+ "step": 888
3158
+ },
3159
+ {
3160
+ "epoch": 0.3094037893273075,
3161
+ "grad_norm": 0.7893505446859834,
3162
+ "learning_rate": 0.0001981893872547005,
3163
+ "loss": 0.6335,
3164
+ "step": 890
3165
+ },
3166
+ {
3167
+ "epoch": 0.31009907874152615,
3168
+ "grad_norm": 1.0962653113728835,
3169
+ "learning_rate": 0.00019816631964203097,
3170
+ "loss": 0.6438,
3171
+ "step": 892
3172
+ },
3173
+ {
3174
+ "epoch": 0.31079436815574485,
3175
+ "grad_norm": 0.40606329821748216,
3176
+ "learning_rate": 0.0001981431073742923,
3177
+ "loss": 0.557,
3178
+ "step": 894
3179
+ },
3180
+ {
3181
+ "epoch": 0.3114896575699635,
3182
+ "grad_norm": 0.8061837126172193,
3183
+ "learning_rate": 0.00019811975048568943,
3184
+ "loss": 0.6334,
3185
+ "step": 896
3186
+ },
3187
+ {
3188
+ "epoch": 0.31218494698418214,
3189
+ "grad_norm": 0.7808955990860935,
3190
+ "learning_rate": 0.00019809624901064038,
3191
+ "loss": 0.5775,
3192
+ "step": 898
3193
+ },
3194
+ {
3195
+ "epoch": 0.31288023639840085,
3196
+ "grad_norm": 0.5527203146534614,
3197
+ "learning_rate": 0.00019807260298377626,
3198
+ "loss": 0.5934,
3199
+ "step": 900
3200
+ },
3201
+ {
3202
+ "epoch": 0.31288023639840085,
3203
+ "eval_loss": 0.666339099407196,
3204
+ "eval_runtime": 759.5196,
3205
+ "eval_samples_per_second": 6.375,
3206
+ "eval_steps_per_second": 0.2,
3207
+ "step": 900
3208
+ },
3209
+ {
3210
+ "epoch": 0.3135755258126195,
3211
+ "grad_norm": 1.151650071753606,
3212
+ "learning_rate": 0.00019804881243994118,
3213
+ "loss": 0.6459,
3214
+ "step": 902
3215
+ },
3216
+ {
3217
+ "epoch": 0.3142708152268382,
3218
+ "grad_norm": 0.37537177441864283,
3219
+ "learning_rate": 0.00019802487741419218,
3220
+ "loss": 0.5537,
3221
+ "step": 904
3222
+ },
3223
+ {
3224
+ "epoch": 0.31496610464105684,
3225
+ "grad_norm": 0.39806583735978385,
3226
+ "learning_rate": 0.00019800079794179927,
3227
+ "loss": 0.5765,
3228
+ "step": 906
3229
+ },
3230
+ {
3231
+ "epoch": 0.3156613940552755,
3232
+ "grad_norm": 0.9252532303995283,
3233
+ "learning_rate": 0.00019797657405824524,
3234
+ "loss": 0.6581,
3235
+ "step": 908
3236
+ },
3237
+ {
3238
+ "epoch": 0.3163566834694942,
3239
+ "grad_norm": 0.4242008643262632,
3240
+ "learning_rate": 0.00019795220579922572,
3241
+ "loss": 0.663,
3242
+ "step": 910
3243
+ },
3244
+ {
3245
+ "epoch": 0.31705197288371284,
3246
+ "grad_norm": 0.5557863138791925,
3247
+ "learning_rate": 0.00019792769320064904,
3248
+ "loss": 0.6492,
3249
+ "step": 912
3250
+ },
3251
+ {
3252
+ "epoch": 0.3177472622979315,
3253
+ "grad_norm": 0.5743017982975046,
3254
+ "learning_rate": 0.0001979030362986363,
3255
+ "loss": 0.6425,
3256
+ "step": 914
3257
+ },
3258
+ {
3259
+ "epoch": 0.3184425517121502,
3260
+ "grad_norm": 0.39667228882787314,
3261
+ "learning_rate": 0.0001978782351295212,
3262
+ "loss": 0.5658,
3263
+ "step": 916
3264
+ },
3265
+ {
3266
+ "epoch": 0.31913784112636884,
3267
+ "grad_norm": 1.2742981139875873,
3268
+ "learning_rate": 0.00019785328972985,
3269
+ "loss": 0.6042,
3270
+ "step": 918
3271
+ },
3272
+ {
3273
+ "epoch": 0.31983313054058754,
3274
+ "grad_norm": 0.7520790754771111,
3275
+ "learning_rate": 0.00019782820013638158,
3276
+ "loss": 0.6248,
3277
+ "step": 920
3278
+ },
3279
+ {
3280
+ "epoch": 0.3205284199548062,
3281
+ "grad_norm": 1.1777266516894538,
3282
+ "learning_rate": 0.0001978029663860872,
3283
+ "loss": 0.6394,
3284
+ "step": 922
3285
+ },
3286
+ {
3287
+ "epoch": 0.32122370936902483,
3288
+ "grad_norm": 0.5383416828808074,
3289
+ "learning_rate": 0.00019777758851615058,
3290
+ "loss": 0.6357,
3291
+ "step": 924
3292
+ },
3293
+ {
3294
+ "epoch": 0.32191899878324354,
3295
+ "grad_norm": 0.5351088818608489,
3296
+ "learning_rate": 0.00019775206656396787,
3297
+ "loss": 0.6111,
3298
+ "step": 926
3299
+ },
3300
+ {
3301
+ "epoch": 0.3226142881974622,
3302
+ "grad_norm": 0.7776255734128178,
3303
+ "learning_rate": 0.00019772640056714744,
3304
+ "loss": 0.5778,
3305
+ "step": 928
3306
+ },
3307
+ {
3308
+ "epoch": 0.3233095776116809,
3309
+ "grad_norm": 0.5049904332607067,
3310
+ "learning_rate": 0.00019770059056351,
3311
+ "loss": 0.5978,
3312
+ "step": 930
3313
+ },
3314
+ {
3315
+ "epoch": 0.32400486702589953,
3316
+ "grad_norm": 0.6894813643690206,
3317
+ "learning_rate": 0.00019767463659108841,
3318
+ "loss": 0.6727,
3319
+ "step": 932
3320
+ },
3321
+ {
3322
+ "epoch": 0.3247001564401182,
3323
+ "grad_norm": 0.6230252249989028,
3324
+ "learning_rate": 0.00019764853868812772,
3325
+ "loss": 0.5911,
3326
+ "step": 934
3327
+ },
3328
+ {
3329
+ "epoch": 0.3253954458543369,
3330
+ "grad_norm": 0.6699617199619087,
3331
+ "learning_rate": 0.00019762229689308499,
3332
+ "loss": 0.6694,
3333
+ "step": 936
3334
+ },
3335
+ {
3336
+ "epoch": 0.32609073526855553,
3337
+ "grad_norm": 0.9762605521595761,
3338
+ "learning_rate": 0.00019759591124462943,
3339
+ "loss": 0.7053,
3340
+ "step": 938
3341
+ },
3342
+ {
3343
+ "epoch": 0.3267860246827742,
3344
+ "grad_norm": 0.5216728233794251,
3345
+ "learning_rate": 0.0001975693817816422,
3346
+ "loss": 0.6958,
3347
+ "step": 940
3348
+ },
3349
+ {
3350
+ "epoch": 0.3274813140969929,
3351
+ "grad_norm": 0.5943791708445256,
3352
+ "learning_rate": 0.00019754270854321625,
3353
+ "loss": 0.6342,
3354
+ "step": 942
3355
+ },
3356
+ {
3357
+ "epoch": 0.32817660351121153,
3358
+ "grad_norm": 0.5341014737913188,
3359
+ "learning_rate": 0.00019751589156865663,
3360
+ "loss": 0.6272,
3361
+ "step": 944
3362
+ },
3363
+ {
3364
+ "epoch": 0.32887189292543023,
3365
+ "grad_norm": 0.8411647140863245,
3366
+ "learning_rate": 0.00019748893089747995,
3367
+ "loss": 0.6041,
3368
+ "step": 946
3369
+ },
3370
+ {
3371
+ "epoch": 0.3295671823396489,
3372
+ "grad_norm": 1.072323043427063,
3373
+ "learning_rate": 0.00019746182656941473,
3374
+ "loss": 0.7152,
3375
+ "step": 948
3376
+ },
3377
+ {
3378
+ "epoch": 0.3302624717538675,
3379
+ "grad_norm": 0.6497829380326366,
3380
+ "learning_rate": 0.00019743457862440115,
3381
+ "loss": 0.6176,
3382
+ "step": 950
3383
+ },
3384
+ {
3385
+ "epoch": 0.33095776116808623,
3386
+ "grad_norm": 0.28736093186011447,
3387
+ "learning_rate": 0.00019740718710259096,
3388
+ "loss": 0.6453,
3389
+ "step": 952
3390
+ },
3391
+ {
3392
+ "epoch": 0.3316530505823049,
3393
+ "grad_norm": 0.27868233108109625,
3394
+ "learning_rate": 0.00019737965204434757,
3395
+ "loss": 0.6051,
3396
+ "step": 954
3397
+ },
3398
+ {
3399
+ "epoch": 0.3323483399965236,
3400
+ "grad_norm": 0.40709235855818693,
3401
+ "learning_rate": 0.00019735197349024576,
3402
+ "loss": 0.6255,
3403
+ "step": 956
3404
+ },
3405
+ {
3406
+ "epoch": 0.3330436294107422,
3407
+ "grad_norm": 0.8385677925045294,
3408
+ "learning_rate": 0.00019732415148107199,
3409
+ "loss": 0.6455,
3410
+ "step": 958
3411
+ },
3412
+ {
3413
+ "epoch": 0.3337389188249609,
3414
+ "grad_norm": 0.5642576200414804,
3415
+ "learning_rate": 0.00019729618605782384,
3416
+ "loss": 0.6971,
3417
+ "step": 960
3418
+ },
3419
+ {
3420
+ "epoch": 0.3344342082391796,
3421
+ "grad_norm": 0.7034648545079693,
3422
+ "learning_rate": 0.00019726807726171039,
3423
+ "loss": 0.6177,
3424
+ "step": 962
3425
+ },
3426
+ {
3427
+ "epoch": 0.3351294976533982,
3428
+ "grad_norm": 1.9840633930320113,
3429
+ "learning_rate": 0.000197239825134152,
3430
+ "loss": 0.6776,
3431
+ "step": 964
3432
+ },
3433
+ {
3434
+ "epoch": 0.33582478706761687,
3435
+ "grad_norm": 1.0091982574836484,
3436
+ "learning_rate": 0.00019721142971678015,
3437
+ "loss": 0.6893,
3438
+ "step": 966
3439
+ },
3440
+ {
3441
+ "epoch": 0.3365200764818356,
3442
+ "grad_norm": 0.9742560258590767,
3443
+ "learning_rate": 0.00019718289105143753,
3444
+ "loss": 0.744,
3445
+ "step": 968
3446
+ },
3447
+ {
3448
+ "epoch": 0.3372153658960542,
3449
+ "grad_norm": 0.6897018399345455,
3450
+ "learning_rate": 0.00019715420918017793,
3451
+ "loss": 0.678,
3452
+ "step": 970
3453
+ },
3454
+ {
3455
+ "epoch": 0.3379106553102729,
3456
+ "grad_norm": 0.29102959771453246,
3457
+ "learning_rate": 0.00019712538414526606,
3458
+ "loss": 0.6663,
3459
+ "step": 972
3460
+ },
3461
+ {
3462
+ "epoch": 0.33860594472449157,
3463
+ "grad_norm": 0.7337107483377766,
3464
+ "learning_rate": 0.0001970964159891777,
3465
+ "loss": 0.663,
3466
+ "step": 974
3467
+ },
3468
+ {
3469
+ "epoch": 0.3393012341387102,
3470
+ "grad_norm": 0.5817704647699353,
3471
+ "learning_rate": 0.00019706730475459953,
3472
+ "loss": 0.6398,
3473
+ "step": 976
3474
+ },
3475
+ {
3476
+ "epoch": 0.3399965235529289,
3477
+ "grad_norm": 0.28703428796704483,
3478
+ "learning_rate": 0.00019703805048442897,
3479
+ "loss": 0.5906,
3480
+ "step": 978
3481
+ },
3482
+ {
3483
+ "epoch": 0.34069181296714757,
3484
+ "grad_norm": 0.41383789019772477,
3485
+ "learning_rate": 0.0001970086532217743,
3486
+ "loss": 0.6709,
3487
+ "step": 980
3488
+ },
3489
+ {
3490
+ "epoch": 0.34138710238136627,
3491
+ "grad_norm": 0.812487649001141,
3492
+ "learning_rate": 0.00019697911300995443,
3493
+ "loss": 0.6191,
3494
+ "step": 982
3495
+ },
3496
+ {
3497
+ "epoch": 0.3420823917955849,
3498
+ "grad_norm": 0.798027200072012,
3499
+ "learning_rate": 0.00019694942989249907,
3500
+ "loss": 0.6608,
3501
+ "step": 984
3502
+ },
3503
+ {
3504
+ "epoch": 0.34277768120980356,
3505
+ "grad_norm": 0.44029385955900757,
3506
+ "learning_rate": 0.00019691960391314837,
3507
+ "loss": 0.647,
3508
+ "step": 986
3509
+ },
3510
+ {
3511
+ "epoch": 0.34347297062402227,
3512
+ "grad_norm": 0.3824484030698272,
3513
+ "learning_rate": 0.00019688963511585295,
3514
+ "loss": 0.6378,
3515
+ "step": 988
3516
+ },
3517
+ {
3518
+ "epoch": 0.3441682600382409,
3519
+ "grad_norm": 0.4121768227084979,
3520
+ "learning_rate": 0.0001968595235447741,
3521
+ "loss": 0.5908,
3522
+ "step": 990
3523
+ },
3524
+ {
3525
+ "epoch": 0.34486354945245956,
3526
+ "grad_norm": 0.5154693781246049,
3527
+ "learning_rate": 0.0001968292692442833,
3528
+ "loss": 0.5632,
3529
+ "step": 992
3530
+ },
3531
+ {
3532
+ "epoch": 0.34555883886667826,
3533
+ "grad_norm": 0.3895510966829308,
3534
+ "learning_rate": 0.0001967988722589624,
3535
+ "loss": 0.4674,
3536
+ "step": 994
3537
+ },
3538
+ {
3539
+ "epoch": 0.3462541282808969,
3540
+ "grad_norm": 0.507531875733667,
3541
+ "learning_rate": 0.00019676833263360352,
3542
+ "loss": 0.5581,
3543
+ "step": 996
3544
+ },
3545
+ {
3546
+ "epoch": 0.3469494176951156,
3547
+ "grad_norm": 0.5476523355263471,
3548
+ "learning_rate": 0.00019673765041320907,
3549
+ "loss": 0.7421,
3550
+ "step": 998
3551
+ },
3552
+ {
3553
+ "epoch": 0.34764470710933426,
3554
+ "grad_norm": 0.8417172933340035,
3555
+ "learning_rate": 0.00019670682564299136,
3556
+ "loss": 0.6774,
3557
+ "step": 1000
3558
+ },
3559
+ {
3560
+ "epoch": 0.3483399965235529,
3561
+ "grad_norm": 0.6717112677412562,
3562
+ "learning_rate": 0.00019667585836837299,
3563
+ "loss": 0.6515,
3564
+ "step": 1002
3565
+ },
3566
+ {
3567
+ "epoch": 0.3490352859377716,
3568
+ "grad_norm": 0.7599904388695796,
3569
+ "learning_rate": 0.0001966447486349864,
3570
+ "loss": 0.5679,
3571
+ "step": 1004
3572
+ },
3573
+ {
3574
+ "epoch": 0.34973057535199026,
3575
+ "grad_norm": 0.44186748583335306,
3576
+ "learning_rate": 0.000196613496488674,
3577
+ "loss": 0.6067,
3578
+ "step": 1006
3579
+ },
3580
+ {
3581
+ "epoch": 0.35042586476620896,
3582
+ "grad_norm": 0.4287165077907837,
3583
+ "learning_rate": 0.00019658210197548805,
3584
+ "loss": 0.5706,
3585
+ "step": 1008
3586
+ },
3587
+ {
3588
+ "epoch": 0.3511211541804276,
3589
+ "grad_norm": 0.6051489125727973,
3590
+ "learning_rate": 0.0001965505651416906,
3591
+ "loss": 0.6178,
3592
+ "step": 1010
3593
+ },
3594
+ {
3595
+ "epoch": 0.35181644359464626,
3596
+ "grad_norm": 0.5003034918118222,
3597
+ "learning_rate": 0.00019651888603375346,
3598
+ "loss": 0.6,
3599
+ "step": 1012
3600
+ },
3601
+ {
3602
+ "epoch": 0.35251173300886496,
3603
+ "grad_norm": 0.6774513066433009,
3604
+ "learning_rate": 0.00019648706469835804,
3605
+ "loss": 0.6248,
3606
+ "step": 1014
3607
+ },
3608
+ {
3609
+ "epoch": 0.3532070224230836,
3610
+ "grad_norm": 0.5666799893616385,
3611
+ "learning_rate": 0.0001964551011823953,
3612
+ "loss": 0.6256,
3613
+ "step": 1016
3614
+ },
3615
+ {
3616
+ "epoch": 0.35390231183730225,
3617
+ "grad_norm": 0.6185519925235329,
3618
+ "learning_rate": 0.00019642299553296582,
3619
+ "loss": 0.5836,
3620
+ "step": 1018
3621
+ },
3622
+ {
3623
+ "epoch": 0.35459760125152096,
3624
+ "grad_norm": 0.5020407783730059,
3625
+ "learning_rate": 0.0001963907477973795,
3626
+ "loss": 0.5148,
3627
+ "step": 1020
3628
+ },
3629
+ {
3630
+ "epoch": 0.3552928906657396,
3631
+ "grad_norm": 0.966855697270511,
3632
+ "learning_rate": 0.00019635835802315574,
3633
+ "loss": 0.6335,
3634
+ "step": 1022
3635
+ },
3636
+ {
3637
+ "epoch": 0.3559881800799583,
3638
+ "grad_norm": 0.8705091175729548,
3639
+ "learning_rate": 0.00019632582625802317,
3640
+ "loss": 0.6313,
3641
+ "step": 1024
3642
+ },
3643
+ {
3644
+ "epoch": 0.35668346949417695,
3645
+ "grad_norm": 0.6028556619755229,
3646
+ "learning_rate": 0.00019629315254991964,
3647
+ "loss": 0.6483,
3648
+ "step": 1026
3649
+ },
3650
+ {
3651
+ "epoch": 0.3573787589083956,
3652
+ "grad_norm": 0.6075038119620636,
3653
+ "learning_rate": 0.00019626033694699214,
3654
+ "loss": 0.6271,
3655
+ "step": 1028
3656
+ },
3657
+ {
3658
+ "epoch": 0.3580740483226143,
3659
+ "grad_norm": 0.7923956541669288,
3660
+ "learning_rate": 0.00019622737949759694,
3661
+ "loss": 0.6338,
3662
+ "step": 1030
3663
+ },
3664
+ {
3665
+ "epoch": 0.35876933773683295,
3666
+ "grad_norm": 1.2067992138100796,
3667
+ "learning_rate": 0.00019619428025029905,
3668
+ "loss": 0.6308,
3669
+ "step": 1032
3670
+ },
3671
+ {
3672
+ "epoch": 0.35946462715105165,
3673
+ "grad_norm": 0.5446351671940789,
3674
+ "learning_rate": 0.00019616103925387265,
3675
+ "loss": 0.6475,
3676
+ "step": 1034
3677
+ },
3678
+ {
3679
+ "epoch": 0.3601599165652703,
3680
+ "grad_norm": 0.8842474031361561,
3681
+ "learning_rate": 0.0001961276565573007,
3682
+ "loss": 0.6654,
3683
+ "step": 1036
3684
+ },
3685
+ {
3686
+ "epoch": 0.36085520597948895,
3687
+ "grad_norm": 1.294693850012853,
3688
+ "learning_rate": 0.00019609413220977496,
3689
+ "loss": 0.6929,
3690
+ "step": 1038
3691
+ },
3692
+ {
3693
+ "epoch": 0.36155049539370765,
3694
+ "grad_norm": 0.7435682846586636,
3695
+ "learning_rate": 0.00019606046626069595,
3696
+ "loss": 0.6791,
3697
+ "step": 1040
3698
+ },
3699
+ {
3700
+ "epoch": 0.3622457848079263,
3701
+ "grad_norm": 0.45762946459115417,
3702
+ "learning_rate": 0.0001960266587596729,
3703
+ "loss": 0.5769,
3704
+ "step": 1042
3705
+ },
3706
+ {
3707
+ "epoch": 0.36294107422214494,
3708
+ "grad_norm": 0.5614638042598611,
3709
+ "learning_rate": 0.00019599270975652352,
3710
+ "loss": 0.6047,
3711
+ "step": 1044
3712
+ },
3713
+ {
3714
+ "epoch": 0.36363636363636365,
3715
+ "grad_norm": 1.6171161059961894,
3716
+ "learning_rate": 0.0001959586193012741,
3717
+ "loss": 0.6932,
3718
+ "step": 1046
3719
+ },
3720
+ {
3721
+ "epoch": 0.3643316530505823,
3722
+ "grad_norm": 1.5847051714441287,
3723
+ "learning_rate": 0.00019592438744415932,
3724
+ "loss": 0.5908,
3725
+ "step": 1048
3726
+ },
3727
+ {
3728
+ "epoch": 0.365026942464801,
3729
+ "grad_norm": 0.4282438415059217,
3730
+ "learning_rate": 0.00019589001423562233,
3731
+ "loss": 0.6749,
3732
+ "step": 1050
3733
+ },
3734
+ {
3735
+ "epoch": 0.365026942464801,
3736
+ "eval_loss": 0.6080955266952515,
3737
+ "eval_runtime": 710.9824,
3738
+ "eval_samples_per_second": 6.81,
3739
+ "eval_steps_per_second": 0.214,
3740
+ "step": 1050
3741
+ },
3742
+ {
3743
+ "epoch": 0.36572223187901964,
3744
+ "grad_norm": 1.0612232842206784,
3745
+ "learning_rate": 0.00019585549972631446,
3746
+ "loss": 0.5669,
3747
+ "step": 1052
3748
+ },
3749
+ {
3750
+ "epoch": 0.3664175212932383,
3751
+ "grad_norm": 2.6994673924740358,
3752
+ "learning_rate": 0.0001958208439670953,
3753
+ "loss": 0.6455,
3754
+ "step": 1054
3755
+ },
3756
+ {
3757
+ "epoch": 0.367112810707457,
3758
+ "grad_norm": 2.022628249772274,
3759
+ "learning_rate": 0.0001957860470090326,
3760
+ "loss": 0.6395,
3761
+ "step": 1056
3762
+ },
3763
+ {
3764
+ "epoch": 0.36780810012167564,
3765
+ "grad_norm": 0.607126211578616,
3766
+ "learning_rate": 0.00019575110890340214,
3767
+ "loss": 0.593,
3768
+ "step": 1058
3769
+ },
3770
+ {
3771
+ "epoch": 0.36850338953589434,
3772
+ "grad_norm": 1.026539890410463,
3773
+ "learning_rate": 0.00019571602970168775,
3774
+ "loss": 0.5939,
3775
+ "step": 1060
3776
+ },
3777
+ {
3778
+ "epoch": 0.369198678950113,
3779
+ "grad_norm": 0.6663599873173821,
3780
+ "learning_rate": 0.00019568080945558104,
3781
+ "loss": 0.6415,
3782
+ "step": 1062
3783
+ },
3784
+ {
3785
+ "epoch": 0.36989396836433164,
3786
+ "grad_norm": 0.5967439670789174,
3787
+ "learning_rate": 0.00019564544821698167,
3788
+ "loss": 0.6348,
3789
+ "step": 1064
3790
+ },
3791
+ {
3792
+ "epoch": 0.37058925777855034,
3793
+ "grad_norm": 0.6468802992284401,
3794
+ "learning_rate": 0.00019560994603799682,
3795
+ "loss": 0.5991,
3796
+ "step": 1066
3797
+ },
3798
+ {
3799
+ "epoch": 0.371284547192769,
3800
+ "grad_norm": 1.2251498168873143,
3801
+ "learning_rate": 0.00019557430297094158,
3802
+ "loss": 0.6001,
3803
+ "step": 1068
3804
+ },
3805
+ {
3806
+ "epoch": 0.37197983660698763,
3807
+ "grad_norm": 0.4681978143920913,
3808
+ "learning_rate": 0.00019553851906833853,
3809
+ "loss": 0.5664,
3810
+ "step": 1070
3811
+ },
3812
+ {
3813
+ "epoch": 0.37267512602120634,
3814
+ "grad_norm": 0.7538053079694034,
3815
+ "learning_rate": 0.00019550259438291782,
3816
+ "loss": 0.5531,
3817
+ "step": 1072
3818
+ },
3819
+ {
3820
+ "epoch": 0.373370415435425,
3821
+ "grad_norm": 0.4626868039226879,
3822
+ "learning_rate": 0.00019546652896761696,
3823
+ "loss": 0.6268,
3824
+ "step": 1074
3825
+ },
3826
+ {
3827
+ "epoch": 0.3740657048496437,
3828
+ "grad_norm": 0.8012577314135656,
3829
+ "learning_rate": 0.00019543032287558097,
3830
+ "loss": 0.7819,
3831
+ "step": 1076
3832
+ },
3833
+ {
3834
+ "epoch": 0.37476099426386233,
3835
+ "grad_norm": 0.7004564506452116,
3836
+ "learning_rate": 0.0001953939761601621,
3837
+ "loss": 0.6505,
3838
+ "step": 1078
3839
+ },
3840
+ {
3841
+ "epoch": 0.375456283678081,
3842
+ "grad_norm": 1.352602706017517,
3843
+ "learning_rate": 0.0001953574888749198,
3844
+ "loss": 0.558,
3845
+ "step": 1080
3846
+ }
3847
+ ],
3848
+ "logging_steps": 2,
3849
+ "max_steps": 5752,
3850
+ "num_input_tokens_seen": 0,
3851
+ "num_train_epochs": 2,
3852
+ "save_steps": 90,
3853
+ "stateful_callbacks": {
3854
+ "TrainerControl": {
3855
+ "args": {
3856
+ "should_epoch_stop": false,
3857
+ "should_evaluate": false,
3858
+ "should_log": false,
3859
+ "should_save": true,
3860
+ "should_training_stop": false
3861
+ },
3862
+ "attributes": {}
3863
+ }
3864
+ },
3865
+ "total_flos": 4303181501693952.0,
3866
+ "train_batch_size": 4,
3867
+ "trial_name": null,
3868
+ "trial_params": null
3869
+ }
checkpoint-1080/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b72add547acd6c009f3a3770072d0cdf7de7e797c7597084319db874a556e470
3
+ size 6904
checkpoint-1080/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info(f"Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info(f"Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
checkpoint-1170/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: ../../initial_seq_model
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.14.0
checkpoint-1170/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "../../initial_seq_model",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 16,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "k_proj",
27
+ "o_proj",
28
+ "gate_proj",
29
+ "down_proj",
30
+ "score",
31
+ "v_proj",
32
+ "up_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
checkpoint-1170/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f7ffe39e0ca812790b65ff4bb4686fd88a07d6d677f35b88c34262a305d21c9
3
+ size 42068368
checkpoint-1170/global_step1170/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4daac094337ae497e0fa8af400fcea891e5f4d048732d70b00163d19d0bc9c6a
3
+ size 63016432
checkpoint-1170/global_step1170/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c04abc9d76fb0fc4f77d202128abf735aae52dd6dd2c04b6c8a8e93fc1b6147
3
+ size 63016432
checkpoint-1170/global_step1170/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15944f6e6c99b74bf11da6a491434dea709b59ac30a0c72ace49374259734d52
3
+ size 63016432
checkpoint-1170/global_step1170/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:725bd60c615905b60e48d120087b89cf39a0985ff7e1eef8eaa9362f68fab0b0
3
+ size 63016432
checkpoint-1170/global_step1170/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9d3fe5fd138dae44b7da5f1483ce0c12a53c23120edcf8ccd30fe5a1050eb48
3
+ size 442088
checkpoint-1170/global_step1170/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0ede948e416acf2e75e2ef8295a456c4b0123d6837df161d349f52573d44abe
3
+ size 442088
checkpoint-1170/global_step1170/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41f602b8bbfa703dc3346c29edd99c3e2961d99e2c8eef3d6df676cd087b2be5
3
+ size 442088
checkpoint-1170/global_step1170/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e4f87069b00e941e780ff271446bb381e2563f76d3f13a2510005008e0c87c1
3
+ size 442088
checkpoint-1170/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step1170
checkpoint-1170/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f4bcac92757b2f2e7ccee1b14e7605e7b1f5a406a59ca1861585945e1a9635
3
+ size 14960
checkpoint-1170/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43cb819b07eea538b35e77881adad4cc16f05b013019b2b36adbbab184c126ab
3
+ size 14960
checkpoint-1170/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36e89c559ec8d731912e7a7126e6f105bd04edb6560947dce420df7f042ade84
3
+ size 14960
checkpoint-1170/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cff8aa495938f6284a7c27d560e94ac101e3a7aefba9f530e0e796f083f388f
3
+ size 14960
checkpoint-1170/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7f1216465e13b10fa0131544f5f3e82daa222fb53d9aa6a40e735f590850cd
3
+ size 1064
checkpoint-1170/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<BOS_TOKEN>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|END_OF_TURN_TOKEN|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<UNK>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-1170/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28c8b8038fcb2756e349a51832a56634423c579a869f39642526327aa56b2989
3
+ size 20125189
checkpoint-1170/tokenizer_config.json ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<PAD>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<UNK>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "<CLS>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "3": {
31
+ "content": "<SEP>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "4": {
39
+ "content": "<MASK_TOKEN>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "5": {
47
+ "content": "<BOS_TOKEN>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "6": {
55
+ "content": "<EOS_TOKEN>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "7": {
63
+ "content": "<EOP_TOKEN>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "255000": {
71
+ "content": "<|START_OF_TURN_TOKEN|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": false
77
+ },
78
+ "255001": {
79
+ "content": "<|END_OF_TURN_TOKEN|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "255002": {
87
+ "content": "<|YES_TOKEN|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "255003": {
95
+ "content": "<|NO_TOKEN|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": false
101
+ },
102
+ "255004": {
103
+ "content": "<|GOOD_TOKEN|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": false
109
+ },
110
+ "255005": {
111
+ "content": "<|BAD_TOKEN|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": false
117
+ },
118
+ "255006": {
119
+ "content": "<|USER_TOKEN|>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "255007": {
127
+ "content": "<|CHATBOT_TOKEN|>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "255008": {
135
+ "content": "<|SYSTEM_TOKEN|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "255009": {
143
+ "content": "<|USER_0_TOKEN|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "255010": {
151
+ "content": "<|USER_1_TOKEN|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "255011": {
159
+ "content": "<|USER_2_TOKEN|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "255012": {
167
+ "content": "<|USER_3_TOKEN|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "255013": {
175
+ "content": "<|USER_4_TOKEN|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ },
182
+ "255014": {
183
+ "content": "<|USER_5_TOKEN|>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": false
189
+ },
190
+ "255015": {
191
+ "content": "<|USER_6_TOKEN|>",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": false
197
+ },
198
+ "255016": {
199
+ "content": "<|USER_7_TOKEN|>",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": false
205
+ },
206
+ "255017": {
207
+ "content": "<|USER_8_TOKEN|>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false,
212
+ "special": false
213
+ },
214
+ "255018": {
215
+ "content": "<|USER_9_TOKEN|>",
216
+ "lstrip": false,
217
+ "normalized": false,
218
+ "rstrip": false,
219
+ "single_word": false,
220
+ "special": false
221
+ },
222
+ "255019": {
223
+ "content": "<|START_THINKING|>",
224
+ "lstrip": false,
225
+ "normalized": false,
226
+ "rstrip": false,
227
+ "single_word": false,
228
+ "special": false
229
+ },
230
+ "255020": {
231
+ "content": "<|END_THINKING|>",
232
+ "lstrip": false,
233
+ "normalized": false,
234
+ "rstrip": false,
235
+ "single_word": false,
236
+ "special": false
237
+ },
238
+ "255021": {
239
+ "content": "<|START_RESPONSE|>",
240
+ "lstrip": false,
241
+ "normalized": false,
242
+ "rstrip": false,
243
+ "single_word": false,
244
+ "special": true
245
+ },
246
+ "255022": {
247
+ "content": "<|END_RESPONSE|>",
248
+ "lstrip": false,
249
+ "normalized": false,
250
+ "rstrip": false,
251
+ "single_word": false,
252
+ "special": true
253
+ },
254
+ "255023": {
255
+ "content": "<|START_ACTION|>",
256
+ "lstrip": false,
257
+ "normalized": false,
258
+ "rstrip": false,
259
+ "single_word": false,
260
+ "special": false
261
+ },
262
+ "255024": {
263
+ "content": "<|END_ACTION|>",
264
+ "lstrip": false,
265
+ "normalized": false,
266
+ "rstrip": false,
267
+ "single_word": false,
268
+ "special": false
269
+ },
270
+ "255025": {
271
+ "content": "<|START_TOOL_RESULT|>",
272
+ "lstrip": false,
273
+ "normalized": false,
274
+ "rstrip": false,
275
+ "single_word": false,
276
+ "special": false
277
+ },
278
+ "255026": {
279
+ "content": "<|END_TOOL_RESULT|>",
280
+ "lstrip": false,
281
+ "normalized": false,
282
+ "rstrip": false,
283
+ "single_word": false,
284
+ "special": false
285
+ },
286
+ "255027": {
287
+ "content": "<|EXTRA_8_TOKEN|>",
288
+ "lstrip": false,
289
+ "normalized": false,
290
+ "rstrip": false,
291
+ "single_word": false,
292
+ "special": false
293
+ },
294
+ "255028": {
295
+ "content": "<|NEW_FILE|>",
296
+ "lstrip": false,
297
+ "normalized": false,
298
+ "rstrip": false,
299
+ "single_word": false,
300
+ "special": true
301
+ },
302
+ "255029": {
303
+ "content": "<|BEGINNING_OF_PREFIX_FIM_TOKEN|>",
304
+ "lstrip": false,
305
+ "normalized": false,
306
+ "rstrip": false,
307
+ "single_word": false,
308
+ "special": false
309
+ },
310
+ "255030": {
311
+ "content": "<|BEGINNING_OF_MIDDLE_FIM_TOKEN|>",
312
+ "lstrip": false,
313
+ "normalized": false,
314
+ "rstrip": false,
315
+ "single_word": false,
316
+ "special": false
317
+ },
318
+ "255031": {
319
+ "content": "<|BEGINNING_OF_SUFFIX_FIM_TOKEN|>",
320
+ "lstrip": false,
321
+ "normalized": false,
322
+ "rstrip": false,
323
+ "single_word": false,
324
+ "special": false
325
+ },
326
+ "255032": {
327
+ "content": "<|END_OF_MIDDLE_FIM_TOKEN|>",
328
+ "lstrip": false,
329
+ "normalized": false,
330
+ "rstrip": false,
331
+ "single_word": false,
332
+ "special": false
333
+ }
334
+ },
335
+ "bos_token": "<BOS_TOKEN>",
336
+ "chat_template": [
337
+ {
338
+ "name": "default",
339
+ "template": "{% if documents %}\n{% set tools = [] %}\n{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"<co>\" and \"</co>\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"<co>span</co: 0:[1,2],1:[0]>\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\n{%- else -%}\n{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}\n {%- set system_message = messages[0]['content'] %}{% elif false == true %}\n {%- set loop_messages = messages %}{% set system_message = '' %}\n{%- else %}\n {%- set loop_messages = messages %}\n {%- set system_message = false %}\n{%- endif %}\n{%- if system_message != false -%}\n {{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}\n{%- else -%}\n {{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|END_OF_TURN_TOKEN|>' }}\n{%- endif %}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n {%- set content = message['content'] -%}\n {%- if message['role'] == 'user' -%}\n {{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}\n {%- elif message['role'] == 'assistant' -%}\n {{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_RESPONSE|>' + content.strip() + '<|END_RESPONSE|><|END_OF_TURN_TOKEN|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt -%}\n {{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_RESPONSE|>' }}\n{%- endif %}\n{% endif %}"
340
+ },
341
+ {
342
+ "name": "tool_use",
343
+ "template": "{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"<co>\" and \"</co>\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"<co>span</co: 0:[1,2],1:[0]>\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
344
+ },
345
+ {
346
+ "name": "rag",
347
+ "template": "{% set tools = [] %}\n{%- macro document_turn(documents) -%}\n{# format documents into chat turn #}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[\n {\"tool_call_id\": \"0\", \"tool_name\": \"direct-injected-document\", \"parameters\": {}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n {\n \"tool_call_id\": \"0\",\n \"results\": {\n{% for doc in documents %}\n \"{{ loop.index0 }}\": {{doc|tojson}}{% if not loop.last %},\n {% endif %}\n{% endfor %}\n\n },\n \"is_error\": null\n }\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %}\n{%- macro tool_call_id_to_int(messages, tool_call_id) %}\n{%- set counter = namespace(value=0) %}\n{%- set tool_call_id_seen = namespace(value=false) %}\n{%- for msg in messages %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls %}\n {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%}\n {{ counter.value }}\n {%- set tool_call_id_seen.value = true %}\n {%- endif %}\n {%- set counter.value = counter.value + 1 %}\n {%- endfor %}\n {%- endif %}\n{%- endfor %}\n{%- endmacro %}\n{%- macro format_tool_message(messages, tool_msg) -%}\n{# format tool message #}\n {\n \"tool_call_id\": \"{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}\",\n \"results\": {\n \"0\": {{ tool_msg.content|tojson }}\n },\n \"is_error\": null\n }\n{%- endmacro -%}\n{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %}\n{%- set tool_idx = namespace(value=0) %}\n{%- set tool_ids_seen = namespace(value=[]) %}\n{%- set sent_documents = namespace(value=false) %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes.\n\nYour information cutoff date is June 2024.\n\nYou have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages.\n{% if tools or documents %}\n\nYou have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests.\n\n## Tool Use\nThink about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first.\n\n0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed.\n NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools.\n\nThen carry out your plan by repeatedly executing the following steps.\n1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing \"tool_name\" and \"parameters\" fields.\n When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>.\n2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results.\n Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its \"tool_call_id\".\n3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>.\n You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded.\n NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user.\n\nYou can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user.\n\n4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>.\n{% if enable_citations %}\n\n## Grounding\nImportantly, note that \"Reflection\" and \"Response\" above can be grounded.\nGrounding means you associate pieces of texts (called \"spans\") with those specific tool results that support them (called \"sources\"). And you use a pair of tags \"<co>\" and \"</co>\" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as \"{tool_call_id}:[{list of result indices}]\", before they are joined together by \",\". E.g., \"<co>span</co: 0:[1,2],1:[0]>\" means that \"span\" is supported by result 1 and 2 from \"tool_call_id=0\" as well as result 0 from \"tool_call_id=1\".\n{% endif %}\n\n## Available Tools\nHere is the list of tools that you have available to you.\nYou can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it.\nEach tool is represented as a JSON object with fields like \"name\", \"description\", \"parameters\" (per JSON Schema), and optionally, \"responses\" (per JSON Schema).\n\n```json\n[\n{% if documents %}\n {\"name\": \"direct-injected-document\", \"description\": \"This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!\", \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []}, \"responses\": {\"200\": {\"description\": \"Successfully returned a list of chunked text snippets from the directly uploaded documents.\", \"content\": {\"application/json\": {\"schema\": {\"type\": \"array\", \"items\": {\"type\": \"object\", \"required\": [\"url\", \"snippet\"], \"properties\": {\"url\": {\"type\": \"string\", \"description\": \"The url of the uploaded document.\"}, \"snippet\": {\"type\": \"string\", \"description\": \"The text snippet for the returned document chunk.\"}}}}}}}}}{%- if tools %},{% endif %}\n\n{% endif %}\n{% for tool in tools %}\n {\"name\": \"{{ tool['function']['name'] }}\", \"description\": \"{{tool['function']['description']}}\", \"parameters\": {{ tool['function']['parameters']|tojson }}, \"responses\": null}{%- if not loop.last %},{% endif %}\n\n{% endfor %}\n]\n```\n\n{% endif %}\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Command.\n- You are a large language model built by Cohere.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n{%- if developer_preamble %}\n\n\n# Developer Preamble\nThe following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions.\n{{ developer_preamble }}\n{%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages %}\n {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>\n {%- elif message.role|lower == 'user' %}\n<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %}\n {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[\n {% for tc in message.tool_calls %}\n {\"tool_call_id\": \"{{ tool_idx.value }}\", \"tool_name\": \"{{ tc['function']['name'] }}\", \"parameters\": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %}\n\n {% set tool_idx.value = tool_idx.value + 1 %}\n {% endfor %}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %}\n {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %}\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[\n{{ format_tool_message(messages, message) }}\n {%- for msg in messages[loop.index0 + 1:] %}\n {%- if msg.role|lower == 'tool' %},\n{{ format_tool_message(messages, msg) }}\n {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %}\n {%- else %}\n {%- break %}\n {%- endif %}\n {%- endfor %}\n\n]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>\n {%- endif %}\n{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
348
+ }
349
+ ],
350
+ "clean_up_tokenization_spaces": false,
351
+ "eos_token": "<|END_OF_TURN_TOKEN|>",
352
+ "extra_special_tokens": {},
353
+ "legacy": true,
354
+ "merges_file": null,
355
+ "model_max_length": 8192,
356
+ "pad_token": "<PAD>",
357
+ "padding_side": "right",
358
+ "sp_model_kwargs": {},
359
+ "spaces_between_special_tokens": false,
360
+ "tokenizer_class": "CohereTokenizer",
361
+ "unk_token": "<UNK>",
362
+ "use_default_system_prompt": false,
363
+ "vocab_file": null
364
+ }
checkpoint-1170/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1170/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b72add547acd6c009f3a3770072d0cdf7de7e797c7597084319db874a556e470
3
+ size 6904
checkpoint-1170/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info(f"Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info(f"Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
checkpoint-1260/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: ../../initial_seq_model
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.14.0
checkpoint-1260/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "../../initial_seq_model",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 16,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "k_proj",
27
+ "o_proj",
28
+ "gate_proj",
29
+ "down_proj",
30
+ "score",
31
+ "v_proj",
32
+ "up_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
checkpoint-1260/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f1bb0249a8f70de7f668f84c0c0df99d3736370e66d3aeb15e41345e964bee9
3
+ size 42068368