zhuangxialie commited on
Commit
0cb498a
·
verified ·
1 Parent(s): 8ab053c

Model save

Browse files
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Qwen-code-7B-SFT-100k-v2-cot-v5
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - sft
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Qwen-code-7B-SFT-100k-v2-cot-v5
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="ZhuangXialie/Qwen-code-7B-SFT-100k-v2-cot-v5", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/dyx_team/huggingface/runs/b55zzbcb)
30
+
31
+
32
+ This model was trained with SFT.
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.17.0.dev0
37
+ - Transformers: 4.51.2
38
+ - Pytorch: 2.6.0
39
+ - Datasets: 3.5.1
40
+ - Tokenizers: 0.21.1
41
+
42
+ ## Citations
43
+
44
+
45
+
46
+ Cite TRL as:
47
+
48
+ ```bibtex
49
+ @misc{vonwerra2022trl,
50
+ title = {{TRL: Transformer Reinforcement Learning}},
51
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
52
+ year = 2020,
53
+ journal = {GitHub repository},
54
+ publisher = {GitHub},
55
+ howpublished = {\url{https://github.com/huggingface/trl}}
56
+ }
57
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 5.6666267347582976e+17,
3
+ "train_loss": 0.04020713732367562,
4
+ "train_runtime": 749.347,
5
+ "train_samples": 9428,
6
+ "train_samples_per_second": 2.637,
7
+ "train_steps_per_second": 0.165
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.51.2"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 5.6666267347582976e+17,
3
+ "train_loss": 0.04020713732367562,
4
+ "train_runtime": 749.347,
5
+ "train_samples": 9428,
6
+ "train_samples_per_second": 2.637,
7
+ "train_steps_per_second": 0.165
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 4.0,
6
+ "eval_steps": 500,
7
+ "global_step": 124,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.16129032258064516,
14
+ "grad_norm": 1.9339179610924326,
15
+ "learning_rate": 1.5384615384615387e-05,
16
+ "loss": 0.2023,
17
+ "mean_token_accuracy": 0.9648663103580475,
18
+ "num_tokens": 1200802.0,
19
+ "step": 5
20
+ },
21
+ {
22
+ "epoch": 0.3225806451612903,
23
+ "grad_norm": 0.20186157671436705,
24
+ "learning_rate": 3.461538461538462e-05,
25
+ "loss": 0.0907,
26
+ "mean_token_accuracy": 0.979491549730301,
27
+ "num_tokens": 2329939.0,
28
+ "step": 10
29
+ },
30
+ {
31
+ "epoch": 0.4838709677419355,
32
+ "grad_norm": 0.11105863000001426,
33
+ "learning_rate": 4.999098891016224e-05,
34
+ "loss": 0.062,
35
+ "mean_token_accuracy": 0.9859958469867707,
36
+ "num_tokens": 3454242.0,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.6451612903225806,
41
+ "grad_norm": 0.07295180708170615,
42
+ "learning_rate": 4.967635798282344e-05,
43
+ "loss": 0.058,
44
+ "mean_token_accuracy": 0.9865534484386445,
45
+ "num_tokens": 4636563.0,
46
+ "step": 20
47
+ },
48
+ {
49
+ "epoch": 0.8064516129032258,
50
+ "grad_norm": 0.061430086599508354,
51
+ "learning_rate": 4.8918364374405345e-05,
52
+ "loss": 0.0517,
53
+ "mean_token_accuracy": 0.9883540272712708,
54
+ "num_tokens": 5780954.0,
55
+ "step": 25
56
+ },
57
+ {
58
+ "epoch": 0.967741935483871,
59
+ "grad_norm": 0.06636359184947321,
60
+ "learning_rate": 4.773216233515633e-05,
61
+ "loss": 0.0523,
62
+ "mean_token_accuracy": 0.9879270672798157,
63
+ "num_tokens": 6973730.0,
64
+ "step": 30
65
+ },
66
+ {
67
+ "epoch": 1.129032258064516,
68
+ "grad_norm": 0.05553079305562554,
69
+ "learning_rate": 4.614146710798645e-05,
70
+ "loss": 0.0441,
71
+ "mean_token_accuracy": 0.9883361339569092,
72
+ "num_tokens": 8106792.0,
73
+ "step": 35
74
+ },
75
+ {
76
+ "epoch": 1.2903225806451613,
77
+ "grad_norm": 0.05845614425520945,
78
+ "learning_rate": 4.417808079950151e-05,
79
+ "loss": 0.0444,
80
+ "mean_token_accuracy": 0.9887041985988617,
81
+ "num_tokens": 9259980.0,
82
+ "step": 40
83
+ },
84
+ {
85
+ "epoch": 1.4516129032258065,
86
+ "grad_norm": 0.05205247747909711,
87
+ "learning_rate": 4.188125657374216e-05,
88
+ "loss": 0.0389,
89
+ "mean_token_accuracy": 0.990032184123993,
90
+ "num_tokens": 10469264.0,
91
+ "step": 45
92
+ },
93
+ {
94
+ "epoch": 1.6129032258064515,
95
+ "grad_norm": 0.06709443615050471,
96
+ "learning_rate": 3.929691388003772e-05,
97
+ "loss": 0.0419,
98
+ "mean_token_accuracy": 0.989645528793335,
99
+ "num_tokens": 11528973.0,
100
+ "step": 50
101
+ },
102
+ {
103
+ "epoch": 1.7741935483870968,
104
+ "grad_norm": 0.049780936243340075,
105
+ "learning_rate": 3.647672040459687e-05,
106
+ "loss": 0.0308,
107
+ "mean_token_accuracy": 0.9919266939163208,
108
+ "num_tokens": 12714383.0,
109
+ "step": 55
110
+ },
111
+ {
112
+ "epoch": 1.935483870967742,
113
+ "grad_norm": 0.05777914245949305,
114
+ "learning_rate": 3.347705909999472e-05,
115
+ "loss": 0.0345,
116
+ "mean_token_accuracy": 0.9910649061203003,
117
+ "num_tokens": 13828224.0,
118
+ "step": 60
119
+ },
120
+ {
121
+ "epoch": 2.096774193548387,
122
+ "grad_norm": 0.0763532245250884,
123
+ "learning_rate": 3.0357900944304774e-05,
124
+ "loss": 0.0298,
125
+ "mean_token_accuracy": 0.9922283053398132,
126
+ "num_tokens": 14895428.0,
127
+ "step": 65
128
+ },
129
+ {
130
+ "epoch": 2.258064516129032,
131
+ "grad_norm": 0.0478737582615227,
132
+ "learning_rate": 2.7181605966332857e-05,
133
+ "loss": 0.0249,
134
+ "mean_token_accuracy": 0.9926450073719024,
135
+ "num_tokens": 16001285.0,
136
+ "step": 70
137
+ },
138
+ {
139
+ "epoch": 2.4193548387096775,
140
+ "grad_norm": 0.059113621264175666,
141
+ "learning_rate": 2.4011676507555546e-05,
142
+ "loss": 0.0231,
143
+ "mean_token_accuracy": 0.9934819102287292,
144
+ "num_tokens": 17101738.0,
145
+ "step": 75
146
+ },
147
+ {
148
+ "epoch": 2.5806451612903225,
149
+ "grad_norm": 0.0666372605177016,
150
+ "learning_rate": 2.0911487646277623e-05,
151
+ "loss": 0.0232,
152
+ "mean_token_accuracy": 0.9933121263980865,
153
+ "num_tokens": 18330596.0,
154
+ "step": 80
155
+ },
156
+ {
157
+ "epoch": 2.741935483870968,
158
+ "grad_norm": 0.06526290734512033,
159
+ "learning_rate": 1.7943020166108926e-05,
160
+ "loss": 0.0252,
161
+ "mean_token_accuracy": 0.9927115201950073,
162
+ "num_tokens": 19460189.0,
163
+ "step": 85
164
+ },
165
+ {
166
+ "epoch": 2.903225806451613,
167
+ "grad_norm": 0.06160045585242219,
168
+ "learning_rate": 1.5165621399994035e-05,
169
+ "loss": 0.0227,
170
+ "mean_token_accuracy": 0.9932076632976532,
171
+ "num_tokens": 20686498.0,
172
+ "step": 90
173
+ },
174
+ {
175
+ "epoch": 3.064516129032258,
176
+ "grad_norm": 0.05412733531697612,
177
+ "learning_rate": 1.2634818723723174e-05,
178
+ "loss": 0.0215,
179
+ "mean_token_accuracy": 0.9935298144817353,
180
+ "num_tokens": 21800001.0,
181
+ "step": 95
182
+ },
183
+ {
184
+ "epoch": 3.225806451612903,
185
+ "grad_norm": 0.057286274686510985,
186
+ "learning_rate": 1.0401209420254312e-05,
187
+ "loss": 0.0123,
188
+ "mean_token_accuracy": 0.9964357137680053,
189
+ "num_tokens": 23023063.0,
190
+ "step": 100
191
+ },
192
+ {
193
+ "epoch": 3.3870967741935485,
194
+ "grad_norm": 0.06710958627001332,
195
+ "learning_rate": 8.509449109326118e-06,
196
+ "loss": 0.0121,
197
+ "mean_token_accuracy": 0.9962392687797547,
198
+ "num_tokens": 24175633.0,
199
+ "step": 105
200
+ },
201
+ {
202
+ "epoch": 3.5483870967741935,
203
+ "grad_norm": 0.051561033179333415,
204
+ "learning_rate": 6.9973589662669455e-06,
205
+ "loss": 0.0129,
206
+ "mean_token_accuracy": 0.9961234986782074,
207
+ "num_tokens": 25391505.0,
208
+ "step": 110
209
+ },
210
+ {
211
+ "epoch": 3.709677419354839,
212
+ "grad_norm": 0.07447115032041235,
213
+ "learning_rate": 5.895169579001987e-06,
214
+ "loss": 0.0137,
215
+ "mean_token_accuracy": 0.9959305047988891,
216
+ "num_tokens": 26515619.0,
217
+ "step": 115
218
+ },
219
+ {
220
+ "epoch": 3.870967741935484,
221
+ "grad_norm": 0.05952950063195277,
222
+ "learning_rate": 5.224916560510316e-06,
223
+ "loss": 0.0133,
224
+ "mean_token_accuracy": 0.9959637641906738,
225
+ "num_tokens": 27647513.0,
226
+ "step": 120
227
+ },
228
+ {
229
+ "epoch": 4.0,
230
+ "mean_token_accuracy": 0.996899887919426,
231
+ "num_tokens": 28505880.0,
232
+ "step": 124,
233
+ "total_flos": 5.6666267347582976e+17,
234
+ "train_loss": 0.04020713732367562,
235
+ "train_runtime": 749.347,
236
+ "train_samples_per_second": 2.637,
237
+ "train_steps_per_second": 0.165
238
+ }
239
+ ],
240
+ "logging_steps": 5,
241
+ "max_steps": 124,
242
+ "num_input_tokens_seen": 0,
243
+ "num_train_epochs": 4,
244
+ "save_steps": 1000,
245
+ "stateful_callbacks": {
246
+ "TrainerControl": {
247
+ "args": {
248
+ "should_epoch_stop": false,
249
+ "should_evaluate": false,
250
+ "should_log": false,
251
+ "should_save": true,
252
+ "should_training_stop": true
253
+ },
254
+ "attributes": {}
255
+ }
256
+ },
257
+ "total_flos": 5.6666267347582976e+17,
258
+ "train_batch_size": 1,
259
+ "trial_name": null,
260
+ "trial_params": null
261
+ }