m-ric commited on
Commit
e69e988
·
verified ·
1 Parent(s): acafb90

Model save

Browse files
Files changed (4) hide show
  1. README.md +2 -4
  2. all_results.json +5 -5
  3. train_results.json +5 -5
  4. trainer_state.json +36 -178
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: HuggingFaceTB/SmolLM2-1.7B-Instruct
3
- datasets: smolagents/training-traces
4
  library_name: transformers
5
  model_name: OpenR1-SmolLM2-1.7B-Instruct-Agentic
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - sft
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for OpenR1-SmolLM2-1.7B-Instruct-Agentic
15
 
16
- This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-1.7B-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct) on the [smolagents/training-traces](https://huggingface.co/datasets/smolagents/training-traces) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/huggingface/huggingface/runs/y4n5o68t)
33
 
34
 
35
  This model was trained with SFT.
 
1
  ---
2
  base_model: HuggingFaceTB/SmolLM2-1.7B-Instruct
 
3
  library_name: transformers
4
  model_name: OpenR1-SmolLM2-1.7B-Instruct-Agentic
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - sft
9
  licence: license
 
11
 
12
  # Model Card for OpenR1-SmolLM2-1.7B-Instruct-Agentic
13
 
14
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-1.7B-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/huggingface/huggingface/runs/obusc7q3)
31
 
32
 
33
  This model was trained with SFT.
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 30936649433088.0,
3
- "train_loss": 0.0,
4
- "train_runtime": 1.1169,
5
  "train_samples": 1204,
6
- "train_samples_per_second": 1259.682,
7
- "train_steps_per_second": 18.801
8
  }
 
1
  {
2
+ "total_flos": 6155761876992.0,
3
+ "train_loss": 1.0376410086949666,
4
+ "train_runtime": 169.1619,
5
  "train_samples": 1204,
6
+ "train_samples_per_second": 8.317,
7
+ "train_steps_per_second": 0.124
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 30936649433088.0,
3
- "train_loss": 0.0,
4
- "train_runtime": 1.1169,
5
  "train_samples": 1204,
6
- "train_samples_per_second": 1259.682,
7
- "train_steps_per_second": 18.801
8
  }
 
1
  {
2
+ "total_flos": 6155761876992.0,
3
+ "train_loss": 1.0376410086949666,
4
+ "train_runtime": 169.1619,
5
  "train_samples": 1204,
6
+ "train_samples_per_second": 8.317,
7
+ "train_steps_per_second": 0.124
8
  }
trainer_state.json CHANGED
@@ -2,201 +2,59 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 10.0,
6
  "eval_steps": 500,
7
- "global_step": 100,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "epoch": 1.8,
14
- "grad_norm": 0.45298852266312745,
15
- "learning_rate": 1e-05,
16
- "loss": 1.2979,
17
- "mean_token_accuracy": 0.7309286197026571,
18
- "num_tokens": 1946617.0,
19
- "step": 5
20
- },
21
- {
22
- "epoch": 1.0,
23
- "grad_norm": 0.40654940219848923,
24
- "learning_rate": 5e-05,
25
- "loss": 1.2213,
26
- "mean_token_accuracy": 0.7395898401737213,
27
- "num_tokens": 2027951.0,
28
- "step": 10
29
- },
30
- {
31
- "epoch": 1.5,
32
- "grad_norm": 0.2847470811451423,
33
- "learning_rate": 4.722222222222222e-05,
34
- "loss": 1.0874,
35
- "mean_token_accuracy": 0.7629910290241242,
36
- "num_tokens": 4554224.0,
37
- "step": 15
38
- },
39
- {
40
- "epoch": 2.0,
41
- "grad_norm": 0.18223384267847015,
42
  "learning_rate": 4.4444444444444447e-05,
43
- "loss": 0.9862,
44
- "mean_token_accuracy": 0.7828312337398529,
45
- "num_tokens": 7110068.0,
46
- "step": 20
47
- },
48
- {
49
- "epoch": 2.5,
50
- "grad_norm": 0.14795383975219814,
51
- "learning_rate": 4.166666666666667e-05,
52
- "loss": 0.9164,
53
- "mean_token_accuracy": 0.7957836747169494,
54
- "num_tokens": 9668383.0,
55
- "step": 25
56
- },
57
- {
58
- "epoch": 3.0,
59
- "grad_norm": 0.13696459830104152,
60
- "learning_rate": 3.888888888888889e-05,
61
- "loss": 0.8717,
62
- "mean_token_accuracy": 0.8046842753887177,
63
- "num_tokens": 12192185.0,
64
- "step": 30
65
- },
66
- {
67
- "epoch": 3.5,
68
- "grad_norm": 0.11364616589196232,
69
- "learning_rate": 3.611111111111111e-05,
70
- "loss": 0.8405,
71
- "mean_token_accuracy": 0.8104514122009278,
72
- "num_tokens": 14727805.0,
73
- "step": 35
74
- },
75
- {
76
- "epoch": 4.0,
77
- "grad_norm": 0.11796516723586992,
78
- "learning_rate": 3.3333333333333335e-05,
79
- "loss": 0.7997,
80
- "mean_token_accuracy": 0.8168916702270508,
81
- "num_tokens": 17274302.0,
82
- "step": 40
83
  },
84
  {
85
- "epoch": 4.5,
86
- "grad_norm": 0.11842645074111845,
87
  "learning_rate": 3.055555555555556e-05,
88
- "loss": 0.7551,
89
- "mean_token_accuracy": 0.8266302287578583,
90
- "num_tokens": 19783780.0,
91
- "step": 45
92
- },
93
- {
94
- "epoch": 5.0,
95
- "grad_norm": 0.10511642047034364,
96
- "learning_rate": 2.777777777777778e-05,
97
- "loss": 0.7741,
98
- "mean_token_accuracy": 0.8223041534423828,
99
- "num_tokens": 22356419.0,
100
- "step": 50
101
- },
102
- {
103
- "epoch": 5.5,
104
- "grad_norm": 0.09869455025230396,
105
- "learning_rate": 2.5e-05,
106
- "loss": 0.7275,
107
- "mean_token_accuracy": 0.8318577527999877,
108
- "num_tokens": 24887270.0,
109
- "step": 55
110
- },
111
- {
112
- "epoch": 6.0,
113
- "grad_norm": 0.10222368245808963,
114
- "learning_rate": 2.2222222222222223e-05,
115
- "loss": 0.7091,
116
- "mean_token_accuracy": 0.835788244009018,
117
- "num_tokens": 27438536.0,
118
- "step": 60
119
- },
120
- {
121
- "epoch": 6.5,
122
- "grad_norm": 0.10330660110696606,
123
- "learning_rate": 1.9444444444444445e-05,
124
- "loss": 0.6933,
125
- "mean_token_accuracy": 0.8389557778835297,
126
- "num_tokens": 29966273.0,
127
- "step": 65
128
  },
129
  {
130
- "epoch": 7.0,
131
- "grad_norm": 0.09938432484517981,
132
  "learning_rate": 1.6666666666666667e-05,
133
- "loss": 0.6679,
134
- "mean_token_accuracy": 0.8443207144737244,
135
- "num_tokens": 32520653.0,
136
- "step": 70
137
- },
138
- {
139
- "epoch": 7.5,
140
- "grad_norm": 0.10806152124483398,
141
- "learning_rate": 1.388888888888889e-05,
142
- "loss": 0.659,
143
- "mean_token_accuracy": 0.8463097810745239,
144
- "num_tokens": 35077955.0,
145
- "step": 75
146
- },
147
- {
148
- "epoch": 8.0,
149
- "grad_norm": 0.10203259245493806,
150
- "learning_rate": 1.1111111111111112e-05,
151
- "loss": 0.6433,
152
- "mean_token_accuracy": 0.8495967864990235,
153
- "num_tokens": 37602770.0,
154
- "step": 80
155
- },
156
- {
157
- "epoch": 8.5,
158
- "grad_norm": 0.10380131304938053,
159
- "learning_rate": 8.333333333333334e-06,
160
- "loss": 0.6315,
161
- "mean_token_accuracy": 0.8518205523490906,
162
- "num_tokens": 40123096.0,
163
- "step": 85
164
- },
165
- {
166
- "epoch": 9.0,
167
- "grad_norm": 0.10787578796789908,
168
- "learning_rate": 5.555555555555556e-06,
169
- "loss": 0.6285,
170
- "mean_token_accuracy": 0.8533558428287507,
171
- "num_tokens": 42684887.0,
172
- "step": 90
173
  },
174
  {
175
- "epoch": 9.5,
176
- "grad_norm": 0.10352907202385529,
177
  "learning_rate": 2.777777777777778e-06,
178
- "loss": 0.6161,
179
- "mean_token_accuracy": 0.8554697334766388,
180
- "num_tokens": 45235842.0,
181
- "step": 95
182
- },
183
- {
184
- "epoch": 10.0,
185
- "grad_norm": 0.10763838091671823,
186
- "learning_rate": 0.0,
187
- "loss": 0.6202,
188
- "mean_token_accuracy": 0.8551790714263916,
189
- "num_tokens": 47767004.0,
190
- "step": 100
191
  },
192
  {
193
- "epoch": 10.0,
194
- "step": 100,
195
- "total_flos": 30936649433088.0,
196
- "train_loss": 0.0,
197
- "train_runtime": 1.1169,
198
- "train_samples_per_second": 1259.682,
199
- "train_steps_per_second": 18.801
 
 
200
  }
201
  ],
202
  "logging_steps": 5,
@@ -216,7 +74,7 @@
216
  "attributes": {}
217
  }
218
  },
219
- "total_flos": 30936649433088.0,
220
  "train_batch_size": 4,
221
  "trial_name": null,
222
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 2.6666666666666665,
6
  "eval_steps": 500,
7
+ "global_step": 21,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "epoch": 0.6666666666666666,
14
+ "grad_norm": 0.3659521589282829,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  "learning_rate": 4.4444444444444447e-05,
16
+ "loss": 1.1898,
17
+ "mean_token_accuracy": 0.749215167760849,
18
+ "num_tokens": 2476935.0,
19
+ "step": 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  },
21
  {
22
+ "epoch": 1.2666666666666666,
23
+ "grad_norm": 0.31543356085932234,
24
  "learning_rate": 3.055555555555556e-05,
25
+ "loss": 1.0408,
26
+ "mean_token_accuracy": 0.773130026128557,
27
+ "num_tokens": 4779069.0,
28
+ "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  },
30
  {
31
+ "epoch": 1.9333333333333333,
32
+ "grad_norm": 0.23778923448881445,
33
  "learning_rate": 1.6666666666666667e-05,
34
+ "loss": 0.9778,
35
+ "mean_token_accuracy": 0.7825889229774475,
36
+ "num_tokens": 7274168.0,
37
+ "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  },
39
  {
40
+ "epoch": 2.533333333333333,
41
+ "grad_norm": 0.19692051437026512,
42
  "learning_rate": 2.777777777777778e-06,
43
+ "loss": 0.9744,
44
+ "mean_token_accuracy": 0.7865516278478835,
45
+ "num_tokens": 9521856.0,
46
+ "step": 20
 
 
 
 
 
 
 
 
 
47
  },
48
  {
49
+ "epoch": 2.6666666666666665,
50
+ "mean_token_accuracy": 0.7911556959152222,
51
+ "num_tokens": 10020101.0,
52
+ "step": 21,
53
+ "total_flos": 6155761876992.0,
54
+ "train_loss": 1.0376410086949666,
55
+ "train_runtime": 169.1619,
56
+ "train_samples_per_second": 8.317,
57
+ "train_steps_per_second": 0.124
58
  }
59
  ],
60
  "logging_steps": 5,
 
74
  "attributes": {}
75
  }
76
  },
77
+ "total_flos": 6155761876992.0,
78
  "train_batch_size": 4,
79
  "trial_name": null,
80
  "trial_params": null